code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
```
from ahrs.filters.complementary import Complementary
from ahrs import Quaternion
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from mango.tuner import Tuner
import time
import pickle
```
## Import Sample Head-Pose Data:
```
acc = pd.read_csv('Acc.csv',header=None).to_numpy()
gyro = pd.read_csv('Gyro.csv',header=None).to_numpy()
Or_GT = pd.read_csv('Or_GT.csv',header=None).to_numpy()
Euler_GT = np.zeros((Or_GT.shape[0],3))
for i in range(Or_GT.shape[0]):
cur_quat = Quaternion(Or_GT[i,:])
Euler_GT[i,:] = cur_quat.to_angles()
plt.plot(Euler_GT[:,1]*57.2958,label='Elevation')
plt.plot(Euler_GT[:,2]*57.2958,label='Azimuth')
plt.xlabel('Sample Count')
plt.ylabel('Rotation (deg)')
plt.legend()
plt.title('Ground Truth Orientation')
plt.show()
```
## Filter Performance (No tuning):
```
act_OR = Complementary(gyr=gyro, acc=acc,frequency=100,gain=0.002).Q
Euler_act = np.zeros((act_OR.shape[0],3))
for i in range(act_OR.shape[0]):
cur_quat = Quaternion(act_OR[i,:])
Euler_act[i,:] = cur_quat.to_angles()
total_error = (np.mean(np.abs(Euler_GT[:,2]-Euler_act[:,2]))*57.2958) + (np.mean(np.abs(Euler_GT[:,1]-Euler_act[:,1]))*57.2958)
min_az_error = np.mean(np.abs(Euler_GT[:,2]-Euler_act[:,2]))*57.2958
min_ele_error = np.mean(np.abs(Euler_GT[:,1]-Euler_act[:,1]))*57.2958
print('MAE:',np.mean((min_az_error,min_ele_error)))
print('Azimuth Error: ',min_az_error)
print('Elevation Error: ',min_ele_error)
plt.plot(Euler_act[:,1]*57.2958,label='Elevation')
plt.plot(Euler_act[:,2]*57.2958,label='Azimuth')
plt.xlabel('Sample Count')
plt.ylabel('Rotation (deg)')
plt.legend()
plt.title('Untuned Filter Orientation')
plt.show()
```
## Filter Performance (Tuned):
```
def objective_NN(gain = 0):
act_OR = Complementary(gyr=gyro, acc=acc,frequency=100,gain=gain).Q
Euler_act = np.zeros((act_OR.shape[0],3))
for i in range(act_OR.shape[0]):
cur_quat = Quaternion(act_OR[i,:])
Euler_act[i,:] = cur_quat.to_angles()
total_error = (np.mean(np.abs(Euler_GT[:,2]-Euler_act[:,2]))*57.2958) + (np.mean(np.abs(Euler_GT[:,1]-Euler_act[:,1]))*57.2958)
return -total_error
def save_res(data, file_name):
pickle.dump( data, open( file_name, "wb" ) )
param_dict = {
'gain_list': np.arange(0,1,0.005)
}
def objfunc(args_list):
objective_evaluated = []
start_time = time.time()
for hyper_par in args_list:
gain_list = hyper_par['gain_list']
objective = objective_NN(gain = gain_list)
objective_evaluated.append(objective)
end_time = time.time()
print('objective:', objective, ' time:',end_time-start_time)
return objective_evaluated
conf_Dict = dict()
conf_Dict['batch_size'] = 1
conf_Dict['num_iteration'] = 100
conf_Dict['initial_random']= 5
tuner = Tuner(param_dict, objfunc,conf_Dict)
all_runs = []
results = tuner.maximize()
all_runs.append(results)
save_res(all_runs,'Complementary_log.p')
act_OR = Complementary(gyr=gyro, acc=acc,frequency=100,gain=results['best_params']['gain_list']).Q
Euler_act = np.zeros((act_OR.shape[0],3))
for i in range(act_OR.shape[0]):
cur_quat = Quaternion(act_OR[i,:])
Euler_act[i,:] = cur_quat.to_angles()
total_error = (np.mean(np.abs(Euler_GT[:,2]-Euler_act[:,2]))*57.2958) + (np.mean(np.abs(Euler_GT[:,1]-Euler_act[:,1]))*57.2958)
min_az_error = np.mean(np.abs(Euler_GT[:,2]-Euler_act[:,2]))*57.2958
min_ele_error = np.mean(np.abs(Euler_GT[:,1]-Euler_act[:,1]))*57.2958
print('MAE:',np.mean((min_az_error,min_ele_error)))
print('Azimuth Error: ',min_az_error)
print('Elevation Error: ',min_ele_error)
plt.plot(Euler_GT[:,2]*57.2958,label='GT')
plt.plot(Euler_act[:,2]*57.2958,label='Predicted')
plt.xlabel('Sample Count')
plt.ylabel('Rotation (deg)')
plt.legend()
plt.title('Azimuth Head-Pose (Tuned)')
plt.show()
plt.plot(Euler_GT[:,1]*57.2958,label='GT')
plt.plot(Euler_act[:,1]*57.2958,label='Predicted')
plt.xlabel('Sample Count')
plt.ylabel('Rotation (deg)')
plt.legend()
plt.title('Elevation Head-Pose (Tuned)')
plt.show()
```
|
github_jupyter
|
from ahrs.filters.complementary import Complementary
from ahrs import Quaternion
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from mango.tuner import Tuner
import time
import pickle
acc = pd.read_csv('Acc.csv',header=None).to_numpy()
gyro = pd.read_csv('Gyro.csv',header=None).to_numpy()
Or_GT = pd.read_csv('Or_GT.csv',header=None).to_numpy()
Euler_GT = np.zeros((Or_GT.shape[0],3))
for i in range(Or_GT.shape[0]):
cur_quat = Quaternion(Or_GT[i,:])
Euler_GT[i,:] = cur_quat.to_angles()
plt.plot(Euler_GT[:,1]*57.2958,label='Elevation')
plt.plot(Euler_GT[:,2]*57.2958,label='Azimuth')
plt.xlabel('Sample Count')
plt.ylabel('Rotation (deg)')
plt.legend()
plt.title('Ground Truth Orientation')
plt.show()
act_OR = Complementary(gyr=gyro, acc=acc,frequency=100,gain=0.002).Q
Euler_act = np.zeros((act_OR.shape[0],3))
for i in range(act_OR.shape[0]):
cur_quat = Quaternion(act_OR[i,:])
Euler_act[i,:] = cur_quat.to_angles()
total_error = (np.mean(np.abs(Euler_GT[:,2]-Euler_act[:,2]))*57.2958) + (np.mean(np.abs(Euler_GT[:,1]-Euler_act[:,1]))*57.2958)
min_az_error = np.mean(np.abs(Euler_GT[:,2]-Euler_act[:,2]))*57.2958
min_ele_error = np.mean(np.abs(Euler_GT[:,1]-Euler_act[:,1]))*57.2958
print('MAE:',np.mean((min_az_error,min_ele_error)))
print('Azimuth Error: ',min_az_error)
print('Elevation Error: ',min_ele_error)
plt.plot(Euler_act[:,1]*57.2958,label='Elevation')
plt.plot(Euler_act[:,2]*57.2958,label='Azimuth')
plt.xlabel('Sample Count')
plt.ylabel('Rotation (deg)')
plt.legend()
plt.title('Untuned Filter Orientation')
plt.show()
def objective_NN(gain = 0):
act_OR = Complementary(gyr=gyro, acc=acc,frequency=100,gain=gain).Q
Euler_act = np.zeros((act_OR.shape[0],3))
for i in range(act_OR.shape[0]):
cur_quat = Quaternion(act_OR[i,:])
Euler_act[i,:] = cur_quat.to_angles()
total_error = (np.mean(np.abs(Euler_GT[:,2]-Euler_act[:,2]))*57.2958) + (np.mean(np.abs(Euler_GT[:,1]-Euler_act[:,1]))*57.2958)
return -total_error
def save_res(data, file_name):
pickle.dump( data, open( file_name, "wb" ) )
param_dict = {
'gain_list': np.arange(0,1,0.005)
}
def objfunc(args_list):
objective_evaluated = []
start_time = time.time()
for hyper_par in args_list:
gain_list = hyper_par['gain_list']
objective = objective_NN(gain = gain_list)
objective_evaluated.append(objective)
end_time = time.time()
print('objective:', objective, ' time:',end_time-start_time)
return objective_evaluated
conf_Dict = dict()
conf_Dict['batch_size'] = 1
conf_Dict['num_iteration'] = 100
conf_Dict['initial_random']= 5
tuner = Tuner(param_dict, objfunc,conf_Dict)
all_runs = []
results = tuner.maximize()
all_runs.append(results)
save_res(all_runs,'Complementary_log.p')
act_OR = Complementary(gyr=gyro, acc=acc,frequency=100,gain=results['best_params']['gain_list']).Q
Euler_act = np.zeros((act_OR.shape[0],3))
for i in range(act_OR.shape[0]):
cur_quat = Quaternion(act_OR[i,:])
Euler_act[i,:] = cur_quat.to_angles()
total_error = (np.mean(np.abs(Euler_GT[:,2]-Euler_act[:,2]))*57.2958) + (np.mean(np.abs(Euler_GT[:,1]-Euler_act[:,1]))*57.2958)
min_az_error = np.mean(np.abs(Euler_GT[:,2]-Euler_act[:,2]))*57.2958
min_ele_error = np.mean(np.abs(Euler_GT[:,1]-Euler_act[:,1]))*57.2958
print('MAE:',np.mean((min_az_error,min_ele_error)))
print('Azimuth Error: ',min_az_error)
print('Elevation Error: ',min_ele_error)
plt.plot(Euler_GT[:,2]*57.2958,label='GT')
plt.plot(Euler_act[:,2]*57.2958,label='Predicted')
plt.xlabel('Sample Count')
plt.ylabel('Rotation (deg)')
plt.legend()
plt.title('Azimuth Head-Pose (Tuned)')
plt.show()
plt.plot(Euler_GT[:,1]*57.2958,label='GT')
plt.plot(Euler_act[:,1]*57.2958,label='Predicted')
plt.xlabel('Sample Count')
plt.ylabel('Rotation (deg)')
plt.legend()
plt.title('Elevation Head-Pose (Tuned)')
plt.show()
| 0.486088 | 0.760851 |
Oftentimes in machine learning, our primary goal is to predict data labels, rather than numerical values. Predicting labels is called *classification* (as opposed to predicting a continuous value, which is called *regression*). However, the fundamental principles are the same: define a measure of misfit (or data likelihood, in the probabilistic context) and a model that allows the prediction of a label subject to some parameter choices, then tune those parameter choices to optimize the predictive accuracy with respect to a training dataset. As one might expect, however, the choices of model and misfit are often different.
As an example dataset, we'll be using the [Iris flower data set](https://en.wikipedia.org/wiki/Iris_flower_data_set), which was first introduced by Ronald A. Fisher in 1936. It provides four features: petal length, petal width, sepal length, and sepal width, along with a class label that divides the data into three classes: *Iris setosa, Iris virginica and Iris versicolor*. It has since become a classic for testing machine learning algorithms, and is available in scikit-learn. Let's import it and have a look:
```
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = [15,12]
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# Load the dataset
iris = datasets.load_iris()
X = iris.data # n x m matrix of features
y = iris.target # n vector of classes
X,X_test,y,y_test = train_test_split(X,y,test_size=0.33,random_state=42) # Split into 33% test and 67% training sets
print(X.shape)
classes = [0,1,2] #Numeric class identifiers: 0 -> Setosa, 1-> Versicolor, 2-> Virginica
# 2D plots of all possible permutations of 2 features out of 4 produces 4 choose 2 plots
fig,axs = plt.subplots(nrows=4,ncols=4)
for i in range(4):
for j in range(4):
if i>j:
axs[i,j].scatter(X[:,i],X[:,j],c=y)
axs[i,j].set_xlabel(iris['feature_names'][i])
axs[i,j].set_ylabel(iris['feature_names'][j])
else: # delete redundant plots
fig.delaxes(axs[i,j])
plt.show()
```
Here, we're going to construct a simple, but surprisingly effective classifier for the iris dataset based on Bayes' rule. This method is called naive Bayes, for reasons that will become clear. The method works in a two step processs: First, for a set of classes $C_k \in \{C_1,C_2,\ldots,C_N\}$, where $N$ is the number of possible class labels (3 in this case), use Bayes' rule to generate a probability that a data instance with a set of features $x$ belongs to each class, given a set of training data $D=(X,y)$. In step two, assign the class label with the highest probability to that data instance. For each class $C_k$, we can use Bayes' rule to determine the probability:
$$
P(C_k|\mathbf{x},D) \propto P(\mathbf{x}|D,C_k) P(C_k|D).
$$
However, this equation implies that we have to compute the joint probability of $\mathbf{x}$, where there could be some complicated correlations between features. We can simplify this model by assuming that all of the features ($x_i \in \mathbf{X}$) are independent, or
$$ P(\mathbf{x}|D,C_k) \approx \prod_{j=1}^n P(x_j|D,C_k), $$
where $n$ is the number of features in a data instance. This is the so-called *naive Bayes* assumption, naive because it assumes no correlation between features. This leads to the simpler model:
$$
P(C_k|\mathbf{x},D) \propto P(C_k|D) \prod_{j=1}^n P(x_j|D,C_k).
$$
Now, we need to specify the distributions for the class prior $P(C_k|D)$ and class likelihoods $P(x_i|D,C_k)$. We can model the prior probability as a Bernoulli distribution
$$P(C=C_k|D) = \eta_k, $$
where $\eta_k$ is the probability that we would label the feature $\mathbf{x}$ as class $C_k$, before having seen any of the values in $\mathbf{x}$. There are different options here. We could say that all classes are equally likely, so $\eta=1/N$. A more common practice is to estimate $\eta$ as the maximum likelihood value from the training data $D$, which for a Bernoulli distribution is
$$
\eta = \frac{\sum_{i=1}^m [C = C_k]}{\sum_{i=1}^m},
$$
or the total proportion of the training data where the label is $C_k$. Note the use of the [Iverson Bracket](https://en.wikipedia.org/wiki/Iverson_bracket) in the above definition.
For the likelihood, since we're considering continuous features, it makes sense to use a continous likelihood function. While there are many options, a normal distribution should do the trick. However, we need to use the training data to estimate the parameters of this normal distribution. This is similar to what we did for the prior:
$$
P(x_j|C_k,D) = \int P(x_j|\mu_j,\sigma_j^2)\, P(\mu_j|C_k,D) \, P(\sigma_j^2|C_k,D) \;\mathrm{d}\mu_j\, \mathrm{d}\sigma_j^2.
$$
Assigning complete probability distributions to $\mu_j$ and $\sigma_j^2$ is the Bayesian thing to do, but leads to some complicated math and perhaps no analytical solution. Instead, let's do what we did before and compute $\mu_j$ and $\sigma_j^2$ using maximum likelihood estimators, which for normally-distributed data are given by
$$
\mu_{j,MLE} = \frac{\sum_{i=1}^n x_{j,i} [C_i=C_k]}{\sum_{i=1}^n [C_i=C_k]},
$$
and
$$
\sigma_{j,MLE}^2 = \frac{ (\sum_{i=1}^n (x_{j,i} - \mu_j)^2 [C_i=C_k])}{(\sum_{i=1}^n [C_i=C_k]) - 1},
$$
which are just the sample mean and sample variance of the training data examples for which the class is $C_k$. As a brief aside, setting the parameter values in this way corresponds to assuming that
$$
P(\mu_j|C_k,D) = \delta(\mu_j - \mu_{j,MLE}(C_k,D))
$$
and
$$
P(\sigma_j^2|C_k,D) = \delta(\sigma_j^2 - \sigma_{j,MLE}^2(C_k,D)),
$$
where $\delta$ is the [Dirac delta function]{https://en.wikipedia.org/wiki/Dirac_delta_function}. This makes the integrals trivial and the likelihood function becomes:
$$
P(x_i|C_k,D) = \mathcal{N}(x_j|\mu_{j,MLE},\sigma_{j,MLE}^2).
$$
We can implement this in just a few lines in Python. Begin by defining some count variables
```
m = X.shape[0] # Number of data instances
m_test = X_test.shape[0] # Number of test data instances
N = 3 # Number of classes
n = X.shape[1] # Number of features
m
m_test
```
For each class, we will need the $n$ values of $\mu_i$ and $\sigma_i^2$, let's define $n \times N$ arrays to hold these values, as well as the $N$ prior probabilities that we will need
```
mu_array = np.zeros((n,N))
sigma2_array = np.zeros((n,N))
prior_array = np.zeros((N))
```
Then we can compute these values by looping over the $N$ classes
```
#Learning phase
for k in range(N): #Loop over each class label
C_k = classes[k]
prior = sum(y==C_k)/float(y.shape[0]) # Count the number of data where the label is C_k
mu = np.sum(X[y==C_k],axis=0)/len(X[y==C_k]) # Take the mean of those features where the corresponding label is C_k
sigma2 = np.sum((X[y==C_k] - mu)**2,axis=0)/(len(X[y==C_k])-1) # Take the variance of those features where the corresponding label is C_k
mu_array[:,k] = mu # Store in the arrays we created above
sigma2_array[:,k] = sigma2
prior_array[k] = prior
mu_array
```
And that's it for the training phase. Naive Bayes models, particularly when they compute the parameters of the likelihood model with maximum likelihood estimation are very efficient. We can perform a similar loop over classes to make predictions. First, let's compute the class probabilities for each entry in the training set:
```
# Training set predictions
class_probabilities = np.zeros((m,N)) # The probabilities for
for i,x in enumerate(X): # Loop over the training data instances
for k in range(N): # Loop over the classes
prior = prior_array[k]
mu = mu_array[:,k]
sigma2 = sigma2_array[:,k]
likelihood = np.prod(np.exp(-(x-mu)**2/(2*sigma2)))
posterior_k = prior*likelihood
class_probabilities[i,k] = posterior_k
class_probabilities /= np.sum(class_probabilities,axis=1,keepdims=True)
print(x, i)
print(class_probabilities[:,:])
```
Now we can classify by finding the class with the highest probability
```
y_pred_train = np.argmax(class_probabilities,axis=1)
y_pred_train
y
```
A nice way to look at classification accuracy is with a confusion matrix. Its entries $p,q$ are just counts corresponding to the predicted class (row) versus the true class (column):
```
cm_train = confusion_matrix(y,y_pred_train)
print cm_train
print "training accuracy:", 1 - sum(abs(y-y_pred_train))/float(m)
```
We can do the same things for the test set:
```
# Test set predictions
class_probabilities = np.zeros((m_test,N))
for i,x in enumerate(X_test):
for k in range(N):
prior = prior_array[k]
mu = mu_array[:,k]
sigma2 = sigma2_array[:,k]
likelihood = np.prod(np.exp(-(x-mu)**2/(2*sigma2)))
posterior_k = prior*likelihood
class_probabilities[i,k] = posterior_k
class_probabilities /= class_probabilities.sum(axis=1,keepdims=True)
y_pred_test = np.argmax(class_probabilities,axis=1)
cm_test = confusion_matrix(y_test,y_pred_test)
print cm_test
print "test accuracy:", 1 - sum(abs(y_test-y_pred_test))/float(m_test)
```
Finally, let's plot the data as we did above, but this time highlight the misclassified features
```
# 2D plots of all possible permutations of 2 features out of 4 produces 4 choose 2 plots
fig,axs = plt.subplots(nrows=4,ncols=4)
for i in range(4):
for j in range(4):
if i>j:
axs[i,j].scatter(X[:,i],X[:,j],c=y)
axs[i,j].plot(X[y!=y_pred_train,i],X[y!=y_pred_train,j],'ro')
axs[i,j].set_xlabel(iris['feature_names'][i])
axs[i,j].set_ylabel(iris['feature_names'][j])
else: # delete redundant plots
fig.delaxes(axs[i,j])
plt.show()
```
Not so surprisingly, the misclassifications fall near the boundaries between classes. The great thing about Naive Bayes is that it works better as the dimensionality of the features increases, because overlaps in feature space become exponentially less common.
|
github_jupyter
|
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = [15,12]
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# Load the dataset
iris = datasets.load_iris()
X = iris.data # n x m matrix of features
y = iris.target # n vector of classes
X,X_test,y,y_test = train_test_split(X,y,test_size=0.33,random_state=42) # Split into 33% test and 67% training sets
print(X.shape)
classes = [0,1,2] #Numeric class identifiers: 0 -> Setosa, 1-> Versicolor, 2-> Virginica
# 2D plots of all possible permutations of 2 features out of 4 produces 4 choose 2 plots
fig,axs = plt.subplots(nrows=4,ncols=4)
for i in range(4):
for j in range(4):
if i>j:
axs[i,j].scatter(X[:,i],X[:,j],c=y)
axs[i,j].set_xlabel(iris['feature_names'][i])
axs[i,j].set_ylabel(iris['feature_names'][j])
else: # delete redundant plots
fig.delaxes(axs[i,j])
plt.show()
m = X.shape[0] # Number of data instances
m_test = X_test.shape[0] # Number of test data instances
N = 3 # Number of classes
n = X.shape[1] # Number of features
m
m_test
mu_array = np.zeros((n,N))
sigma2_array = np.zeros((n,N))
prior_array = np.zeros((N))
#Learning phase
for k in range(N): #Loop over each class label
C_k = classes[k]
prior = sum(y==C_k)/float(y.shape[0]) # Count the number of data where the label is C_k
mu = np.sum(X[y==C_k],axis=0)/len(X[y==C_k]) # Take the mean of those features where the corresponding label is C_k
sigma2 = np.sum((X[y==C_k] - mu)**2,axis=0)/(len(X[y==C_k])-1) # Take the variance of those features where the corresponding label is C_k
mu_array[:,k] = mu # Store in the arrays we created above
sigma2_array[:,k] = sigma2
prior_array[k] = prior
mu_array
# Training set predictions
class_probabilities = np.zeros((m,N)) # The probabilities for
for i,x in enumerate(X): # Loop over the training data instances
for k in range(N): # Loop over the classes
prior = prior_array[k]
mu = mu_array[:,k]
sigma2 = sigma2_array[:,k]
likelihood = np.prod(np.exp(-(x-mu)**2/(2*sigma2)))
posterior_k = prior*likelihood
class_probabilities[i,k] = posterior_k
class_probabilities /= np.sum(class_probabilities,axis=1,keepdims=True)
print(x, i)
print(class_probabilities[:,:])
y_pred_train = np.argmax(class_probabilities,axis=1)
y_pred_train
y
cm_train = confusion_matrix(y,y_pred_train)
print cm_train
print "training accuracy:", 1 - sum(abs(y-y_pred_train))/float(m)
# Test set predictions
class_probabilities = np.zeros((m_test,N))
for i,x in enumerate(X_test):
for k in range(N):
prior = prior_array[k]
mu = mu_array[:,k]
sigma2 = sigma2_array[:,k]
likelihood = np.prod(np.exp(-(x-mu)**2/(2*sigma2)))
posterior_k = prior*likelihood
class_probabilities[i,k] = posterior_k
class_probabilities /= class_probabilities.sum(axis=1,keepdims=True)
y_pred_test = np.argmax(class_probabilities,axis=1)
cm_test = confusion_matrix(y_test,y_pred_test)
print cm_test
print "test accuracy:", 1 - sum(abs(y_test-y_pred_test))/float(m_test)
# 2D plots of all possible permutations of 2 features out of 4 produces 4 choose 2 plots
fig,axs = plt.subplots(nrows=4,ncols=4)
for i in range(4):
for j in range(4):
if i>j:
axs[i,j].scatter(X[:,i],X[:,j],c=y)
axs[i,j].plot(X[y!=y_pred_train,i],X[y!=y_pred_train,j],'ro')
axs[i,j].set_xlabel(iris['feature_names'][i])
axs[i,j].set_ylabel(iris['feature_names'][j])
else: # delete redundant plots
fig.delaxes(axs[i,j])
plt.show()
| 0.761095 | 0.990044 |
# Training Neural Networks
The network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.
<img src="assets/function_approx.png" width=500px>
At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.
To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems
$$
\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}
$$
where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.
By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base.
<img src='assets/gradient_descent.png' width=350px>
## Backpropagation
For single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.
Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.
<img src='assets/backprop_diagram.png' width=550px>
In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.
To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.
$$
\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}
$$
**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.
We update our weights using this gradient with some learning rate $\alpha$.
$$
\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}
$$
The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum.
## Losses in PyTorch
Let's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.
Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss),
> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.
>
> The input is expected to contain scores for each class.
This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.
```
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
```
### Note
If you haven't seen `nn.Sequential` yet, please finish the end of the Part 2 notebook.
```
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# Define the loss
criterion = nn.CrossEntropyLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
```
In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.LogSoftmax)). Then you can get the actual probabilities by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss)).
>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. Note that for `nn.LogSoftmax` and `F.log_softmax` you'll need to set the `dim` keyword argument appropriately. `dim=0` calculates softmax across the rows, so each column sums to 1, while `dim=1` calculates across the columns so each row sums to 1. Think about what you want the output to be and choose `dim` appropriately.
```
# TODO: Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
# TODO: Define the loss
criterion = nn.NLLLoss()
### Run this to check your work
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
```
## Autograd
Now that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.
You can turn off gradients for a block of code with the `torch.no_grad()` content:
```python
x = torch.zeros(1, requires_grad=True)
>>> with torch.no_grad():
... y = x * 2
>>> y.requires_grad
False
```
Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.
The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.
```
x = torch.randn(2,2, requires_grad=True)
print(x)
y = x**2
print(y)
```
Below we can see the operation that created `y`, a power operation `PowBackward0`.
```
## grad_fn shows the function that generated this variable
print(y.grad_fn)
```
The autgrad module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.
```
z = y.mean()
print(z)
```
You can check the gradients for `x` and `y` but they are empty currently.
```
print(x.grad)
```
To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`
$$
\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}
$$
```
z.backward()
print(x.grad)
print(x/2)
```
These gradients calculations are particularly useful for neural networks. For training we need the gradients of the weights with respect to the cost. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step.
## Loss and Autograd together
When we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.
```
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
images, labels = next(iter(trainloader))
images = images.view(images.shape[0], -1)
logits = model(images)
loss = criterion(logits, labels)
print('Before backward pass: \n', model[0].weight.grad)
loss.backward()
print('After backward pass: \n', model[0].weight.grad)
```
## Training the network!
There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.
```
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
```
Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:
* Make a forward pass through the network
* Use the network output to calculate the loss
* Perform a backward pass through the network with `loss.backward()` to calculate the gradients
* Take a step with the optimizer to update the weights
Below I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
```
print('Initial weights - ', model[0].weight)
images, labels = next(iter(trainloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
print('Gradient -', model[0].weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model[0].weight)
```
### Training for real
Now we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.
>**Exercise:** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.
```
## Your solution here
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 8
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
```
With the network trained, we can check out it's predictions.
```
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logits = model.forward(img)
# Output of the network are logits, need to take softmax for probabilities
ps = F.softmax(logits, dim=1)
helper.view_classify(img.view(1, 28, 28), ps)
```
Now our network is brilliant. It can accurately predict the digits in our images. Next up you'll write the code for training a neural network on a more complex dataset.
|
github_jupyter
|
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10))
# Define the loss
criterion = nn.CrossEntropyLoss()
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
# TODO: Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
# TODO: Define the loss
criterion = nn.NLLLoss()
### Run this to check your work
# Get our data
images, labels = next(iter(trainloader))
# Flatten images
images = images.view(images.shape[0], -1)
# Forward pass, get our logits
logits = model(images)
# Calculate the loss with the logits and the labels
loss = criterion(logits, labels)
print(loss)
x = torch.zeros(1, requires_grad=True)
>>> with torch.no_grad():
... y = x * 2
>>> y.requires_grad
False
x = torch.randn(2,2, requires_grad=True)
print(x)
y = x**2
print(y)
## grad_fn shows the function that generated this variable
print(y.grad_fn)
z = y.mean()
print(z)
print(x.grad)
z.backward()
print(x.grad)
print(x/2)
# Build a feed-forward network
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
images, labels = next(iter(trainloader))
images = images.view(images.shape[0], -1)
logits = model(images)
loss = criterion(logits, labels)
print('Before backward pass: \n', model[0].weight.grad)
loss.backward()
print('After backward pass: \n', model[0].weight.grad)
from torch import optim
# Optimizers require the parameters to optimize and a learning rate
optimizer = optim.SGD(model.parameters(), lr=0.01)
print('Initial weights - ', model[0].weight)
images, labels = next(iter(trainloader))
images.resize_(64, 784)
# Clear the gradients, do this because gradients are accumulated
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
print('Gradient -', model[0].weight.grad)
# Take an update step and few the new weights
optimizer.step()
print('Updated weights - ', model[0].weight)
## Your solution here
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)
epochs = 8
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Flatten MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
%matplotlib inline
import helper
images, labels = next(iter(trainloader))
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
logits = model.forward(img)
# Output of the network are logits, need to take softmax for probabilities
ps = F.softmax(logits, dim=1)
helper.view_classify(img.view(1, 28, 28), ps)
| 0.892422 | 0.992489 |
[Sascha Spors](https://orcid.org/0000-0001-7225-9992),
Professorship Signal Theory and Digital Signal Processing,
[Institute of Communications Engineering (INT)](https://www.int.uni-rostock.de/),
Faculty of Computer Science and Electrical Engineering (IEF),
[University of Rostock, Germany](https://www.uni-rostock.de/en/)
# Tutorial Signals and Systems (Signal- und Systemtheorie)
Summer Semester 2021 (Bachelor Course #24015)
- lecture: https://github.com/spatialaudio/signals-and-systems-lecture
- tutorial: https://github.com/spatialaudio/signals-and-systems-exercises
WIP...
The project is currently under heavy development while adding new material for the summer semester 2021
Feel free to contact lecturer [frank.schultz@uni-rostock.de](https://orcid.org/0000-0002-3010-0294)
## Fourier Series Placeholder
```
import numpy as np
import matplotlib.pyplot as plt
def coeffRealA(k,A,T,T_h):
if k == 0:
return A*T_h/T
else:
return 2*A/(k*np.pi)*np.sin(k*np.pi*T_h/T)
def coeffRealB(k,A,T,T_h):
if k == 0:
return 0
else:
return 2*A/(np.pi*k)*(1-np.cos(k*np.pi*T_h/T))
def coeffRealC(k,A,T,T_h):
if k == 0:
return A*T_h/(2*T)
else:
return 2*A*T/(T_h*k**2*np.pi**2)*(1-np.cos(k*np.pi*T_h/T))
plt.close()
index = 0
A = 1
T = 4
T_h = 2
kmax = 5
t = np.arange(-2*T,2*T,1/10000)
x = np.zeros(t.size)
for k in range(0,kmax+1):
x += coeffRealA(k=k,A=A,T=T,T_h=T_h)*np.cos(k*2*np.pi/T*t)
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
tt=np.arange(-3/4*T,3/4*T,1/10000)
plt.xlabel('$t$')
plt.ylabel('$a_k\\cdot\\cos(k\\omega t)$ and $\\frac{a_0}{2}$')
for k in range(0,kmax+1):
plt.plot(tt,coeffRealA(k,A=A,T=T,T_h=T_h)*np.cos(k*2*np.pi/T*tt),label='$k=$%d' %(k))
plt.grid(True)
plt.ylim(-A,A)
plt.xticks(np.arange(-3/4*T,3/4*T,T/4))
plt.xlim(tt[0],tt[tt.size-1])
s = 'Fourier series components of task a)'
plt.title(s)
plt.legend(loc=1)
plt.subplot(1,2,2)
plt.xlabel('$t$')
plt.ylabel('$x_a(t)$')
plt.plot(t,x,'b',label='$T=%d$\n$T_h=%.1f$\n$0 \leq k \leq %d$' %(T,T_h,kmax))
plt.ylim(np.amin(x)-np.abs(np.amax(x)-np.amin(x))/5,np.amax(x)+np.abs(np.amax(x)-np.amin(x))/5)
plt.grid(True)
plt.legend(loc='lower center')
plt.xticks(np.arange(-2*T,2*T,T/2))
plt.xlim(t[0],t[t.size-1])
s = 'real fourier series task a)'
plt.title(s)
plt.savefig('Fourierseries_'+str(index)+'.pdf') # Name wird benötigt
plt.close()
k = np.arange(start=0,stop=kmax+1,step=1)
plt.xlabel('$t$')
plt.ylabel('$a_k$')
s = 'real fourier series coefficents of task a)'
plt.title(s)
ak = np.zeros(kmax+1)
for i in range(0,kmax+1):
ak[i]=coeffRealA(k=i,A=A,T=T,T_h=T_h)
plt.ylim(np.amin(np.real(ak))-np.abs(np.amax(np.real(ak))-np.amin(np.real(ak)))/5,np.amax(np.real(ak))+np.abs(np.amax(np.real(ak))-np.amin(np.real(ak)))/5)
plt.grid(True)
plt.xticks(np.arange(-1,kmax+2,1))
plt.xlim(-1,kmax+1)
plt.stem(k,ak,label='$A=%d$\n$T=%d$\n$T_h=%.2f$'%(A,T,T_h))
plt.legend()
plt.savefig('Fourierseriescoeff_'+str(index)+'.pdf')
plt.close()
index = 1
A = 1
T = 4
T_h = 2
kmax = 5
t = np.arange(-2*T,2*T,1/10000)
x = np.zeros(t.size)
for k in range(0,kmax+1):
x += coeffRealB(k=k,A=A,T=T,T_h=T_h)*np.sin(k*2*np.pi/T*t)
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
tt=np.arange(-3/4*T,3/4*T,1/10000)
plt.xlabel('$t$')
plt.ylabel('$b_k\\cdot\\sin(k\\omega t)$')
for k in range(0,kmax+1):
plt.plot(tt,coeffRealA(k,A=A,T=T,T_h=T_h)*np.cos(k*2*np.pi/T*tt),label='$k=$%d' %(k))
plt.grid(True)
plt.ylim(-A,A)
plt.xticks(np.arange(-3/4*T,3/4*T,T/4))
plt.xlim(tt[0],tt[tt.size-1])
s = 'Fourier series components of task b)'
plt.title(s)
plt.legend(loc=1)
plt.subplot(1,2,2)
plt.xlabel('$t$')
plt.ylabel('$x_b(t)$')
plt.plot(t,x,'b',label='$T=%d$\n$T_h=%.1f$\n$0 \leq k \leq %d$' %(T,T_h,kmax))
plt.ylim(np.amin(x)-np.abs(np.amax(x)-np.amin(x))/5,np.amax(x)+np.abs(np.amax(x)-np.amin(x))/5)
plt.grid(True)
plt.legend(loc='lower center')
plt.xticks(np.arange(-2*T,2*T,T/2))
plt.xlim(t[0],t[t.size-1])
s = 'real fourier series task b)'
plt.title(s)
plt.savefig('Fourierseries_'+str(index)+'.pdf') # Name wird benötigt
plt.close()
k = np.arange(start=0,stop=kmax+1,step=1)
plt.xlabel('$t$')
plt.ylabel('$b_k$')
s = 'real fourier series coefficents of task b)'
plt.title(s)
ak = np.zeros(kmax+1)
for i in range(0,kmax+1):
ak[i]=coeffRealB(k=i,A=A,T=T,T_h=T_h)
plt.ylim(-1/5*np.amax(ak),6/5*np.amax(ak))
plt.grid(True)
plt.xticks(np.arange(-1,kmax+2,1))
plt.xlim(-1,kmax+1)
plt.stem(k,ak,label='$A=%d$\n$T=%d$\n$T_h=%.2f$'%(A,T,T_h))
plt.legend()
plt.savefig('Fourierseriescoeff_'+str(index)+'.pdf')
plt.close()
index = 2
A = 1
T = 4
T_h = 2
kmax = 5
t = np.arange(-2*T,2*T,1/10000)
x = np.zeros(t.size)
for k in range(0,kmax+1):
x += coeffRealC(k=k,A=A,T=T,T_h=T_h)*np.cos(k*2*np.pi/T*t)
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
tt=np.arange(-3/4*T,3/4*T,1/10000)
plt.xlabel('$t$')
plt.ylabel('$a_k\\cdot\\cos(k\\omega t)$ and $\\frac{a_0}{2}$')
for k in range(0,kmax+1):
plt.plot(tt,coeffRealA(k,A=A,T=T,T_h=T_h)*np.cos(k*2*np.pi/T*tt),label='$k=$%d' %(k))
plt.grid(True)
plt.ylim(-A,A)
plt.xticks(np.arange(-3/4*T,3/4*T,T/4))
plt.xlim(tt[0],tt[tt.size-1])
s = 'Fourier series components of task c)'
plt.title(s)
plt.legend(loc=1)
plt.subplot(1,2,2)
plt.xlabel('$t$')
plt.ylabel('$x_c(t)$')
plt.plot(t,x,'b',label='$T=%d$\n$T_h=%.1f$\n$0 \leq k \leq %d$' %(T,T_h,kmax))
plt.ylim(np.amin(x)-np.abs(np.amax(x)-np.amin(x))/5,np.amax(x)+np.abs(np.amax(x)-np.amin(x))/5)
plt.grid(True)
plt.legend(loc='lower center')
plt.xticks(np.arange(-2*T,2*T,T/2))
plt.xlim(t[0],t[t.size-1])
s = 'real fourier series task c)'
plt.title(s)
plt.savefig('Fourierseries_'+str(index)+'.pdf') # Name wird benötigt
plt.close()
k = np.arange(start=0,stop=kmax+1,step=1)
plt.xlabel('$t$')
plt.ylabel('$a_k$')
s = 'real fourier series coefficents of task c)'
plt.title(s)
ak = np.zeros(kmax+1)
for i in range(0,kmax+1):
ak[i]=coeffRealA(k=i,A=A,T=T,T_h=T_h)
plt.ylim(np.amin(np.real(ak))-np.abs(np.amax(np.real(ak))-np.amin(np.real(ak)))/5,np.amax(np.real(ak))+np.abs(np.amax(np.real(ak))-np.amin(np.real(ak)))/5)
plt.grid(True)
plt.xticks(np.arange(-1,kmax+2,1))
plt.xlim(-1,kmax+1)
plt.stem(k,ak,label='$A=%d$\n$T=%d$\n$T_h=%.2f$'%(A,T,T_h))
plt.legend()
plt.savefig('Fourierseriescoeff_'+str(index)+'.pdf')
def plot_complex_fourierseries(A,T,T_h,coeff,index,kmin,kmax,task,omega):
plt.close()
t = np.arange(-2*T,2*T,1/(10000))
x = np.zeros(t.size,dtype=complex)
for k in range(kmin,kmax+1):
x += coeff(k=k,A=A,T=T,T_h=T_h)*np.exp(1j*omega*k*t)
plt.ylabel('$\Re \{x_{%s}(t)\}$'%(task))
plt.xlabel('$t$')
s = 'fourier series of task '+task+') with complex coefficents'
plt.title(s)
plt.grid(True)
plt.xticks(np.arange(start=-3*T,stop=3*T,step=1/2*T))
plt.xlim(-2*T,2*T)
if np.abs(np.amax(np.real(x))-np.amin(np.real(x)))<10/7:
plt.ylim(np.amin(np.real(x))+np.abs(np.amax(np.real(x))-np.amin(np.real(x)))/2-1,np.amin(np.real(x))+np.abs(np.amax(np.real(x))-np.amin(np.real(x)))/2+1)
else:
plt.ylim(np.amin(np.real(x))-np.abs(np.amax(np.real(x))-np.amin(np.real(x)))/5,np.amax(np.real(x))+np.abs(np.amax(np.real(x))-np.amin(np.real(x)))/5)
plt.plot(t,np.real(x),label='$T=%d$\n$T_h=%d$\n$%d\leq k \leq %d$'%(2*np.pi/omega,2*np.pi/(omega*T)*T_h,kmin,kmax))
plt.legend()
plt.savefig('Fourierseries_'+str(index)+'.pdf')
k = np.arange(start=kmin,stop=kmax+1,step=1)
ckm = np.zeros(k.size,dtype=complex)
ckp = np.zeros(k.size,dtype=complex)
i = kmin
while i <= kmax:
ckm[i+kmax]=coeff(k=i,A=A,T=T,T_h=T_h)
i += 1
ckp = np.angle(ckm+0)
ckm = np.abs(ckm)
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
plt.xlabel('$k$')
plt.ylabel('$|c_k|$')
s = 'fourier coefficent magnitude of task '+task+')'
plt.title(s)
plt.grid(True)
plt.xticks(np.arange(kmin-1,kmax+2,1))
plt.xlim(kmin-1,kmax+1)
plt.ylim(-1/5*np.amax(ckm),6/5*np.amax(ckm))
plt.stem(k,ckm)
plt.subplot(1,2,2)
plt.ylabel('$\\varphi(k)$')
plt.xlabel('k')
s = 'fourier coefficent phase of task '+task+')'
plt.title(s)
plt.grid(True)
plt.xticks(np.arange(kmin-1,kmax+2,1))
plt.xlim(kmin-1,kmax+1)
plt.ylim(-6/5*np.pi,6/5*np.pi)
plt.stem(k,ckp)
plt.savefig('Fourierseriescoeff_'+str(index)+'.pdf')
plt.close()
def coeffAComplex(k,A,T,T_h):
if k == 0:
return A*T_h/T
else:
return A/(np.pi*k)*np.sin(k*np.pi*T_h/T)
def coeffBComplex(k,A,T,T_h):
if k == 0:
return 0
else:
return A/(1j*k*np.pi)*(1-np.cos(k*np.pi*T_h/T))
def coeffCComplex(k,A,T,T_h):
if k == 0:
return A*T_h/(2*T)
else:
return A*T/(T_h*k**2*np.pi**2)*(1-np.cos(k*np.pi*T_h/T))
def coeffDComplex(k,A,T,T_h):
return 1*coeffAComplex(k=k,A=A,T=T,T_h=T_h)+1j*coeffBComplex(k=k,A=A,T=T,T_h=T_h)
def coeffEComplex(k,A,T,T_h):
return np.exp(-1j*k*2*np.pi/T*T_h/2)*coeffAComplex(k=k,A=A,T=T,T_h=T_h)
def coeffFComplex(k,A,T,T_h):
return coeffAComplex(k=k-3,A=A,T=T,T_h=T_h)
def coeffGComplex(k,A,T,T_h):
return coeffBComplex(k=-k,A=A,T=T,T_h=T_h)
def coeffHComplex(k,A,T,T_h):
return T*coeffAComplex(k=k,A=A,T=T,T_h=T_h)*coeffBComplex(k=k,A=A,T=T,T_h=T_h)
def coeffIComplex(k,A,T,T_h):
return 1j*k*2*np.pi/T*coeffCComplex(k=k,A=A,T=T,T_h=T_h)
def coeffJComplex(k,A,T,T_h):
return coeffAComplex(k=k,A=A,T=T,T_h=T_h)
def coeffKComplex(k,A,T,T_h):
if k == 0:
return 1
else:
return 0
def coeffLComplex(k,A,T,T_h):
if k == 0:
return 0
else:
return A**2/(1j*k*np.pi)+1j*A**2*T/(T_h*k**2*np.pi**2)*np.sin(k*np.pi+T_h/T)
def coeffMComplex(k,A,T,T_h):
if k == 0:
return 0
else:
return T/(2j*k*np.pi)*coeffBComplex(k=k,A=A,T=T,T_h=T_h)
coeff = [coeffAComplex,coeffBComplex,coeffCComplex,coeffDComplex,coeffEComplex,coeffFComplex,coeffGComplex,coeffHComplex,coeffIComplex,coeffJComplex,coeffKComplex,coeffLComplex,coeffMComplex]
index = 3
character = 97
for i in coeff:
if chr(character)=='j':
plot_complex_fourierseries(A=1,T=4,T_h=2,kmin=-5,kmax=5,coeff=i,index=index,task=chr(character),omega=np.pi/4)
else:
plot_complex_fourierseries(A=1,T=4,T_h=2,kmin=-5,kmax=5,coeff=i,index=index,task=chr(character),omega=np.pi/2)
index += 1
character += 1
```
## Copyright
This tutorial is provided as Open Educational Resource (OER), to be found at
https://github.com/spatialaudio/signals-and-systems-exercises
accompanying the OER lecture
https://github.com/spatialaudio/signals-and-systems-lecture.
Both are licensed under a) the Creative Commons Attribution 4.0 International
License for text and graphics and b) the MIT License for source code.
Please attribute material from the tutorial as *Frank Schultz,
Continuous- and Discrete-Time Signals and Systems - A Tutorial Featuring
Computational Examples, University of Rostock* with
``main file, github URL, commit number and/or version tag, year``.
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
def coeffRealA(k,A,T,T_h):
if k == 0:
return A*T_h/T
else:
return 2*A/(k*np.pi)*np.sin(k*np.pi*T_h/T)
def coeffRealB(k,A,T,T_h):
if k == 0:
return 0
else:
return 2*A/(np.pi*k)*(1-np.cos(k*np.pi*T_h/T))
def coeffRealC(k,A,T,T_h):
if k == 0:
return A*T_h/(2*T)
else:
return 2*A*T/(T_h*k**2*np.pi**2)*(1-np.cos(k*np.pi*T_h/T))
plt.close()
index = 0
A = 1
T = 4
T_h = 2
kmax = 5
t = np.arange(-2*T,2*T,1/10000)
x = np.zeros(t.size)
for k in range(0,kmax+1):
x += coeffRealA(k=k,A=A,T=T,T_h=T_h)*np.cos(k*2*np.pi/T*t)
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
tt=np.arange(-3/4*T,3/4*T,1/10000)
plt.xlabel('$t$')
plt.ylabel('$a_k\\cdot\\cos(k\\omega t)$ and $\\frac{a_0}{2}$')
for k in range(0,kmax+1):
plt.plot(tt,coeffRealA(k,A=A,T=T,T_h=T_h)*np.cos(k*2*np.pi/T*tt),label='$k=$%d' %(k))
plt.grid(True)
plt.ylim(-A,A)
plt.xticks(np.arange(-3/4*T,3/4*T,T/4))
plt.xlim(tt[0],tt[tt.size-1])
s = 'Fourier series components of task a)'
plt.title(s)
plt.legend(loc=1)
plt.subplot(1,2,2)
plt.xlabel('$t$')
plt.ylabel('$x_a(t)$')
plt.plot(t,x,'b',label='$T=%d$\n$T_h=%.1f$\n$0 \leq k \leq %d$' %(T,T_h,kmax))
plt.ylim(np.amin(x)-np.abs(np.amax(x)-np.amin(x))/5,np.amax(x)+np.abs(np.amax(x)-np.amin(x))/5)
plt.grid(True)
plt.legend(loc='lower center')
plt.xticks(np.arange(-2*T,2*T,T/2))
plt.xlim(t[0],t[t.size-1])
s = 'real fourier series task a)'
plt.title(s)
plt.savefig('Fourierseries_'+str(index)+'.pdf') # Name wird benötigt
plt.close()
k = np.arange(start=0,stop=kmax+1,step=1)
plt.xlabel('$t$')
plt.ylabel('$a_k$')
s = 'real fourier series coefficents of task a)'
plt.title(s)
ak = np.zeros(kmax+1)
for i in range(0,kmax+1):
ak[i]=coeffRealA(k=i,A=A,T=T,T_h=T_h)
plt.ylim(np.amin(np.real(ak))-np.abs(np.amax(np.real(ak))-np.amin(np.real(ak)))/5,np.amax(np.real(ak))+np.abs(np.amax(np.real(ak))-np.amin(np.real(ak)))/5)
plt.grid(True)
plt.xticks(np.arange(-1,kmax+2,1))
plt.xlim(-1,kmax+1)
plt.stem(k,ak,label='$A=%d$\n$T=%d$\n$T_h=%.2f$'%(A,T,T_h))
plt.legend()
plt.savefig('Fourierseriescoeff_'+str(index)+'.pdf')
plt.close()
index = 1
A = 1
T = 4
T_h = 2
kmax = 5
t = np.arange(-2*T,2*T,1/10000)
x = np.zeros(t.size)
for k in range(0,kmax+1):
x += coeffRealB(k=k,A=A,T=T,T_h=T_h)*np.sin(k*2*np.pi/T*t)
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
tt=np.arange(-3/4*T,3/4*T,1/10000)
plt.xlabel('$t$')
plt.ylabel('$b_k\\cdot\\sin(k\\omega t)$')
for k in range(0,kmax+1):
plt.plot(tt,coeffRealA(k,A=A,T=T,T_h=T_h)*np.cos(k*2*np.pi/T*tt),label='$k=$%d' %(k))
plt.grid(True)
plt.ylim(-A,A)
plt.xticks(np.arange(-3/4*T,3/4*T,T/4))
plt.xlim(tt[0],tt[tt.size-1])
s = 'Fourier series components of task b)'
plt.title(s)
plt.legend(loc=1)
plt.subplot(1,2,2)
plt.xlabel('$t$')
plt.ylabel('$x_b(t)$')
plt.plot(t,x,'b',label='$T=%d$\n$T_h=%.1f$\n$0 \leq k \leq %d$' %(T,T_h,kmax))
plt.ylim(np.amin(x)-np.abs(np.amax(x)-np.amin(x))/5,np.amax(x)+np.abs(np.amax(x)-np.amin(x))/5)
plt.grid(True)
plt.legend(loc='lower center')
plt.xticks(np.arange(-2*T,2*T,T/2))
plt.xlim(t[0],t[t.size-1])
s = 'real fourier series task b)'
plt.title(s)
plt.savefig('Fourierseries_'+str(index)+'.pdf') # Name wird benötigt
plt.close()
k = np.arange(start=0,stop=kmax+1,step=1)
plt.xlabel('$t$')
plt.ylabel('$b_k$')
s = 'real fourier series coefficents of task b)'
plt.title(s)
ak = np.zeros(kmax+1)
for i in range(0,kmax+1):
ak[i]=coeffRealB(k=i,A=A,T=T,T_h=T_h)
plt.ylim(-1/5*np.amax(ak),6/5*np.amax(ak))
plt.grid(True)
plt.xticks(np.arange(-1,kmax+2,1))
plt.xlim(-1,kmax+1)
plt.stem(k,ak,label='$A=%d$\n$T=%d$\n$T_h=%.2f$'%(A,T,T_h))
plt.legend()
plt.savefig('Fourierseriescoeff_'+str(index)+'.pdf')
plt.close()
index = 2
A = 1
T = 4
T_h = 2
kmax = 5
t = np.arange(-2*T,2*T,1/10000)
x = np.zeros(t.size)
for k in range(0,kmax+1):
x += coeffRealC(k=k,A=A,T=T,T_h=T_h)*np.cos(k*2*np.pi/T*t)
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
tt=np.arange(-3/4*T,3/4*T,1/10000)
plt.xlabel('$t$')
plt.ylabel('$a_k\\cdot\\cos(k\\omega t)$ and $\\frac{a_0}{2}$')
for k in range(0,kmax+1):
plt.plot(tt,coeffRealA(k,A=A,T=T,T_h=T_h)*np.cos(k*2*np.pi/T*tt),label='$k=$%d' %(k))
plt.grid(True)
plt.ylim(-A,A)
plt.xticks(np.arange(-3/4*T,3/4*T,T/4))
plt.xlim(tt[0],tt[tt.size-1])
s = 'Fourier series components of task c)'
plt.title(s)
plt.legend(loc=1)
plt.subplot(1,2,2)
plt.xlabel('$t$')
plt.ylabel('$x_c(t)$')
plt.plot(t,x,'b',label='$T=%d$\n$T_h=%.1f$\n$0 \leq k \leq %d$' %(T,T_h,kmax))
plt.ylim(np.amin(x)-np.abs(np.amax(x)-np.amin(x))/5,np.amax(x)+np.abs(np.amax(x)-np.amin(x))/5)
plt.grid(True)
plt.legend(loc='lower center')
plt.xticks(np.arange(-2*T,2*T,T/2))
plt.xlim(t[0],t[t.size-1])
s = 'real fourier series task c)'
plt.title(s)
plt.savefig('Fourierseries_'+str(index)+'.pdf') # Name wird benötigt
plt.close()
k = np.arange(start=0,stop=kmax+1,step=1)
plt.xlabel('$t$')
plt.ylabel('$a_k$')
s = 'real fourier series coefficents of task c)'
plt.title(s)
ak = np.zeros(kmax+1)
for i in range(0,kmax+1):
ak[i]=coeffRealA(k=i,A=A,T=T,T_h=T_h)
plt.ylim(np.amin(np.real(ak))-np.abs(np.amax(np.real(ak))-np.amin(np.real(ak)))/5,np.amax(np.real(ak))+np.abs(np.amax(np.real(ak))-np.amin(np.real(ak)))/5)
plt.grid(True)
plt.xticks(np.arange(-1,kmax+2,1))
plt.xlim(-1,kmax+1)
plt.stem(k,ak,label='$A=%d$\n$T=%d$\n$T_h=%.2f$'%(A,T,T_h))
plt.legend()
plt.savefig('Fourierseriescoeff_'+str(index)+'.pdf')
def plot_complex_fourierseries(A,T,T_h,coeff,index,kmin,kmax,task,omega):
plt.close()
t = np.arange(-2*T,2*T,1/(10000))
x = np.zeros(t.size,dtype=complex)
for k in range(kmin,kmax+1):
x += coeff(k=k,A=A,T=T,T_h=T_h)*np.exp(1j*omega*k*t)
plt.ylabel('$\Re \{x_{%s}(t)\}$'%(task))
plt.xlabel('$t$')
s = 'fourier series of task '+task+') with complex coefficents'
plt.title(s)
plt.grid(True)
plt.xticks(np.arange(start=-3*T,stop=3*T,step=1/2*T))
plt.xlim(-2*T,2*T)
if np.abs(np.amax(np.real(x))-np.amin(np.real(x)))<10/7:
plt.ylim(np.amin(np.real(x))+np.abs(np.amax(np.real(x))-np.amin(np.real(x)))/2-1,np.amin(np.real(x))+np.abs(np.amax(np.real(x))-np.amin(np.real(x)))/2+1)
else:
plt.ylim(np.amin(np.real(x))-np.abs(np.amax(np.real(x))-np.amin(np.real(x)))/5,np.amax(np.real(x))+np.abs(np.amax(np.real(x))-np.amin(np.real(x)))/5)
plt.plot(t,np.real(x),label='$T=%d$\n$T_h=%d$\n$%d\leq k \leq %d$'%(2*np.pi/omega,2*np.pi/(omega*T)*T_h,kmin,kmax))
plt.legend()
plt.savefig('Fourierseries_'+str(index)+'.pdf')
k = np.arange(start=kmin,stop=kmax+1,step=1)
ckm = np.zeros(k.size,dtype=complex)
ckp = np.zeros(k.size,dtype=complex)
i = kmin
while i <= kmax:
ckm[i+kmax]=coeff(k=i,A=A,T=T,T_h=T_h)
i += 1
ckp = np.angle(ckm+0)
ckm = np.abs(ckm)
plt.figure(figsize=(10,4))
plt.subplot(1,2,1)
plt.xlabel('$k$')
plt.ylabel('$|c_k|$')
s = 'fourier coefficent magnitude of task '+task+')'
plt.title(s)
plt.grid(True)
plt.xticks(np.arange(kmin-1,kmax+2,1))
plt.xlim(kmin-1,kmax+1)
plt.ylim(-1/5*np.amax(ckm),6/5*np.amax(ckm))
plt.stem(k,ckm)
plt.subplot(1,2,2)
plt.ylabel('$\\varphi(k)$')
plt.xlabel('k')
s = 'fourier coefficent phase of task '+task+')'
plt.title(s)
plt.grid(True)
plt.xticks(np.arange(kmin-1,kmax+2,1))
plt.xlim(kmin-1,kmax+1)
plt.ylim(-6/5*np.pi,6/5*np.pi)
plt.stem(k,ckp)
plt.savefig('Fourierseriescoeff_'+str(index)+'.pdf')
plt.close()
def coeffAComplex(k,A,T,T_h):
if k == 0:
return A*T_h/T
else:
return A/(np.pi*k)*np.sin(k*np.pi*T_h/T)
def coeffBComplex(k,A,T,T_h):
if k == 0:
return 0
else:
return A/(1j*k*np.pi)*(1-np.cos(k*np.pi*T_h/T))
def coeffCComplex(k,A,T,T_h):
if k == 0:
return A*T_h/(2*T)
else:
return A*T/(T_h*k**2*np.pi**2)*(1-np.cos(k*np.pi*T_h/T))
def coeffDComplex(k,A,T,T_h):
return 1*coeffAComplex(k=k,A=A,T=T,T_h=T_h)+1j*coeffBComplex(k=k,A=A,T=T,T_h=T_h)
def coeffEComplex(k,A,T,T_h):
return np.exp(-1j*k*2*np.pi/T*T_h/2)*coeffAComplex(k=k,A=A,T=T,T_h=T_h)
def coeffFComplex(k,A,T,T_h):
return coeffAComplex(k=k-3,A=A,T=T,T_h=T_h)
def coeffGComplex(k,A,T,T_h):
return coeffBComplex(k=-k,A=A,T=T,T_h=T_h)
def coeffHComplex(k,A,T,T_h):
return T*coeffAComplex(k=k,A=A,T=T,T_h=T_h)*coeffBComplex(k=k,A=A,T=T,T_h=T_h)
def coeffIComplex(k,A,T,T_h):
return 1j*k*2*np.pi/T*coeffCComplex(k=k,A=A,T=T,T_h=T_h)
def coeffJComplex(k,A,T,T_h):
return coeffAComplex(k=k,A=A,T=T,T_h=T_h)
def coeffKComplex(k,A,T,T_h):
if k == 0:
return 1
else:
return 0
def coeffLComplex(k,A,T,T_h):
if k == 0:
return 0
else:
return A**2/(1j*k*np.pi)+1j*A**2*T/(T_h*k**2*np.pi**2)*np.sin(k*np.pi+T_h/T)
def coeffMComplex(k,A,T,T_h):
if k == 0:
return 0
else:
return T/(2j*k*np.pi)*coeffBComplex(k=k,A=A,T=T,T_h=T_h)
coeff = [coeffAComplex,coeffBComplex,coeffCComplex,coeffDComplex,coeffEComplex,coeffFComplex,coeffGComplex,coeffHComplex,coeffIComplex,coeffJComplex,coeffKComplex,coeffLComplex,coeffMComplex]
index = 3
character = 97
for i in coeff:
if chr(character)=='j':
plot_complex_fourierseries(A=1,T=4,T_h=2,kmin=-5,kmax=5,coeff=i,index=index,task=chr(character),omega=np.pi/4)
else:
plot_complex_fourierseries(A=1,T=4,T_h=2,kmin=-5,kmax=5,coeff=i,index=index,task=chr(character),omega=np.pi/2)
index += 1
character += 1
| 0.327238 | 0.908008 |
```
!wget https://anaconda.org/pytorch/faiss-cpu/1.2.1/download/linux-64/faiss-cpu-1.2.1-py36_cuda9.0.176_1.tar.bz2
!tar xvjf faiss-cpu-1.2.1-py36_cuda9.0.176_1.tar.bz2
!cp -r lib/python3.6/site-packages/* /usr/local/lib/python3.6/dist-packages/
!pip install mkl
%tensorflow_version 2.x
!pip install tensorflow-gpu==2.0
import tensorflow as tf
!pip install pyarrow
!apt install libomp-dev
from google.colab import drive
drive.mount('/content/drive')
!pwd; ls
#!cp BioBert.tar.gz "drive/MyDrive/Colab Notebooks/"
#!cp DataAndCheckpoint.zip "drive/MyDrive/Colab Notebooks/"
!pip install "drive/MyDrive/Colab Notebooks/docproduct.zip"
#!pip cache purge
!pip install "drive/MyDrive/Colab Notebooks/gpt2_estimator.zip"
#!rm DataAndCheckpoint.zip
#@title Downaload all model checkpoints, and question/answer data.
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
import os
import requests
import urllib.request
import shutil
if not os.path.exists('BioBert.tar.gz'):
if os.path.exists('drive/MyDrive/Colab Notebooks/BioBert.tar.gz'):
shutil.copy('drive/MyDrive/Colab Notebooks/BioBert.tar.gz','.')
else:
# Download the file from `url` and save it locally under `file_name`:
urllib.request.urlretrieve('https://github.com/naver/biobert-pretrained/releases/download/v1.0-pubmed-pmc/biobert_v1.0_pubmed_pmc.tar.gz', 'BioBert.tar.gz')
!cp BioBert.tar.gz "drive/MyDrive/Colab Notebooks/"
if not os.path.exists('BioBertFolder'):
os.makedirs('BioBertFolder')
import tarfile
tar = tarfile.open("BioBert.tar.gz")
tar.extractall(path='BioBertFolder/')
tar.close()
file_id = '1uCXv6mQkFfpw5txGnVCsl93Db7t5Z2mp'
download_file_from_google_drive(file_id, 'Float16EmbeddingsExpanded5-27-19.pkl')
if not os.path.exists('DataAndCheckpoint.zip'):
if os.path.exists('drive/MyDrive/Colab Notebooks/DataAndCheckpoint.zip'):
shutil.copy('drive/MyDrive/Colab Notebooks/DataAndCheckpoint.zip','.')
else:
file_id = 'https://onedrive.live.com/download?cid=9DEDF3C1E2D7E77F&resid=9DEDF3C1E2D7E77F%2132792&authkey=AEQ8GtkcDbe3K98'
urllib.request.urlretrieve( file_id, 'DataAndCheckpoint.zip')
!cp DataAndCheckpoint.zip "drive/MyDrive/Colab Notebooks/"
if not os.path.exists('newFolder'):
os.makedirs('newFolder')
import zipfile
zip_ref = zipfile.ZipFile('DataAndCheckpoint.zip', 'r')
zip_ref.extractall('newFolder')
zip_ref.close()
#@title Load model weights and Q&A data. Double click to see code
from docproduct.predictor import RetreiveQADoc
pretrained_path = 'BioBertFolder/biobert_v1.0_pubmed_pmc/'
# ffn_weight_file = None
bert_ffn_weight_file = 'newFolder/models/bertffn_crossentropy/bertffn'
embedding_file = 'Float16EmbeddingsExpanded5-27-19.pkl'
doc = RetreiveQADoc(pretrained_path=pretrained_path,
ffn_weight_file=None,
bert_ffn_weight_file=bert_ffn_weight_file,
embedding_file=embedding_file)
while True:
print('Enter Question: ')
question_text = input()
if len(question_text) <= 1:
print('Quitting.. Bye.')
break
search_similarity_by = 'answer' #@param ['answer', "question"]
number_results_toReturn=1 #@param {type:"number"}
answer_only=True #@param ["False", "True"] {type:"raw"}
returned_results = doc.predict( question_text ,
search_by=search_similarity_by, topk=number_results_toReturn, answer_only=answer_only)
print('')
for jk in range(len(returned_results)):
print("Result ", jk+1)
print(returned_results[jk])
print('')
!pip install flask_ngrok
# !pwd; ls; chmod +777 ngrok
# !./ngrok authtoken 23vZ4puLfclclQ5f5wSzfJ4c0eS_65exTsXLh7GaYWDQGuktV
!npm install -g localtunnel
!nohup lt --port 5000 -s aidoc &
!ps aux
from flask_ngrok import run_with_ngrok
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route("/")
def home():
return "<h1>This is medical assistant, please tell your symptoms..</h1>"
@app.route("/medicaladvice", methods=['GET', 'POST'])
def medicaladvice():
question = request.args.get("question")
print('Question: ', question)
if question is None and request.content_type == 'application/json':
question = request.json.get("question")
if question is None:
return 'Empty question!!'
print('Question from json data:', question)
elif not isinstance(question, str):
return 'Could not find question in request'
search_similarity_by = 'answer' #@param ['answer', "question"]
number_results_toReturn = 1
answer_only=True #@param ["False", "True"] {type:"raw"}
returned_results = doc.predict(question ,
search_by=search_similarity_by,
topk=number_results_toReturn,
answer_only=answer_only)
print ("Answer: ", returned_results[0])
return returned_results[0]
app.run()
```
|
github_jupyter
|
!wget https://anaconda.org/pytorch/faiss-cpu/1.2.1/download/linux-64/faiss-cpu-1.2.1-py36_cuda9.0.176_1.tar.bz2
!tar xvjf faiss-cpu-1.2.1-py36_cuda9.0.176_1.tar.bz2
!cp -r lib/python3.6/site-packages/* /usr/local/lib/python3.6/dist-packages/
!pip install mkl
%tensorflow_version 2.x
!pip install tensorflow-gpu==2.0
import tensorflow as tf
!pip install pyarrow
!apt install libomp-dev
from google.colab import drive
drive.mount('/content/drive')
!pwd; ls
#!cp BioBert.tar.gz "drive/MyDrive/Colab Notebooks/"
#!cp DataAndCheckpoint.zip "drive/MyDrive/Colab Notebooks/"
!pip install "drive/MyDrive/Colab Notebooks/docproduct.zip"
#!pip cache purge
!pip install "drive/MyDrive/Colab Notebooks/gpt2_estimator.zip"
#!rm DataAndCheckpoint.zip
#@title Downaload all model checkpoints, and question/answer data.
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
import os
import requests
import urllib.request
import shutil
if not os.path.exists('BioBert.tar.gz'):
if os.path.exists('drive/MyDrive/Colab Notebooks/BioBert.tar.gz'):
shutil.copy('drive/MyDrive/Colab Notebooks/BioBert.tar.gz','.')
else:
# Download the file from `url` and save it locally under `file_name`:
urllib.request.urlretrieve('https://github.com/naver/biobert-pretrained/releases/download/v1.0-pubmed-pmc/biobert_v1.0_pubmed_pmc.tar.gz', 'BioBert.tar.gz')
!cp BioBert.tar.gz "drive/MyDrive/Colab Notebooks/"
if not os.path.exists('BioBertFolder'):
os.makedirs('BioBertFolder')
import tarfile
tar = tarfile.open("BioBert.tar.gz")
tar.extractall(path='BioBertFolder/')
tar.close()
file_id = '1uCXv6mQkFfpw5txGnVCsl93Db7t5Z2mp'
download_file_from_google_drive(file_id, 'Float16EmbeddingsExpanded5-27-19.pkl')
if not os.path.exists('DataAndCheckpoint.zip'):
if os.path.exists('drive/MyDrive/Colab Notebooks/DataAndCheckpoint.zip'):
shutil.copy('drive/MyDrive/Colab Notebooks/DataAndCheckpoint.zip','.')
else:
file_id = 'https://onedrive.live.com/download?cid=9DEDF3C1E2D7E77F&resid=9DEDF3C1E2D7E77F%2132792&authkey=AEQ8GtkcDbe3K98'
urllib.request.urlretrieve( file_id, 'DataAndCheckpoint.zip')
!cp DataAndCheckpoint.zip "drive/MyDrive/Colab Notebooks/"
if not os.path.exists('newFolder'):
os.makedirs('newFolder')
import zipfile
zip_ref = zipfile.ZipFile('DataAndCheckpoint.zip', 'r')
zip_ref.extractall('newFolder')
zip_ref.close()
#@title Load model weights and Q&A data. Double click to see code
from docproduct.predictor import RetreiveQADoc
pretrained_path = 'BioBertFolder/biobert_v1.0_pubmed_pmc/'
# ffn_weight_file = None
bert_ffn_weight_file = 'newFolder/models/bertffn_crossentropy/bertffn'
embedding_file = 'Float16EmbeddingsExpanded5-27-19.pkl'
doc = RetreiveQADoc(pretrained_path=pretrained_path,
ffn_weight_file=None,
bert_ffn_weight_file=bert_ffn_weight_file,
embedding_file=embedding_file)
while True:
print('Enter Question: ')
question_text = input()
if len(question_text) <= 1:
print('Quitting.. Bye.')
break
search_similarity_by = 'answer' #@param ['answer', "question"]
number_results_toReturn=1 #@param {type:"number"}
answer_only=True #@param ["False", "True"] {type:"raw"}
returned_results = doc.predict( question_text ,
search_by=search_similarity_by, topk=number_results_toReturn, answer_only=answer_only)
print('')
for jk in range(len(returned_results)):
print("Result ", jk+1)
print(returned_results[jk])
print('')
!pip install flask_ngrok
# !pwd; ls; chmod +777 ngrok
# !./ngrok authtoken 23vZ4puLfclclQ5f5wSzfJ4c0eS_65exTsXLh7GaYWDQGuktV
!npm install -g localtunnel
!nohup lt --port 5000 -s aidoc &
!ps aux
from flask_ngrok import run_with_ngrok
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route("/")
def home():
return "<h1>This is medical assistant, please tell your symptoms..</h1>"
@app.route("/medicaladvice", methods=['GET', 'POST'])
def medicaladvice():
question = request.args.get("question")
print('Question: ', question)
if question is None and request.content_type == 'application/json':
question = request.json.get("question")
if question is None:
return 'Empty question!!'
print('Question from json data:', question)
elif not isinstance(question, str):
return 'Could not find question in request'
search_similarity_by = 'answer' #@param ['answer', "question"]
number_results_toReturn = 1
answer_only=True #@param ["False", "True"] {type:"raw"}
returned_results = doc.predict(question ,
search_by=search_similarity_by,
topk=number_results_toReturn,
answer_only=answer_only)
print ("Answer: ", returned_results[0])
return returned_results[0]
app.run()
| 0.41478 | 0.2194 |
## Importing libraries
```
import pandas as pd
import numpy as np
import datetime
import warnings
warnings.filterwarnings('ignore')
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
%matplotlib inline
pd.set_option('display.max_columns', None)
```
Uploading files to google colab.
## Importing data
```
data = pd.read_csv('unit4.csv')
data.head()
data.shape
```
## Checking data types
```
data.info()
```
## Checking for null values
```
nulls = pd.DataFrame(data.isna().sum()/len(data))
nulls= nulls.reset_index()
nulls.columns = ['column_name', 'Percentage Null Values']
nulls.sort_values(by='Percentage Null Values', ascending = False)
```
## Checking the numerical values
```
numericals = data.select_dtypes(np.number)
numericals.head()
```
* The INCOME might be an important factor in predicting the gift value, so even though it has a lot of null values, we will not drop the column.
* In this exercise, we will try a more precise method to replace the null values, instead of simply replacing them by a constant value, mean or median.
* We will use a similar method for the column TIMELAG .
## Checking the **income's** histogram
```
sns.histplot(data['INCOME'])
```
# Activity: Dealing with missing values.
Possible approaches:
**Drop:**
Let's consider the **gender** column.
* Can we guess in somehow what is the missing gender? **NO**.
* Can this column have any possible value compatible with a missing value? **NO**
Therefore, we are forced to drop the corresponding rows.
```
data['GENDER'].value_counts()
to_drop = data[~data['GENDER'].isin(['F','M'])].index.tolist()
data.drop(to_drop, inplace = True)
data.reset_index(drop=True)
data['GENDER'].value_counts()
```
**Replace:**
If we have some other information about that tells us we can do this, even if it is not the missing information.
For example, if the data follow an approximately normal distribution, we might want to substitute the missing values with the mean. You always need to have something that "tells you" that you can replace the data.
Let's consider column **HOMEOWNR**.
* Can we guess the value? **NO**
* Can this column have any possible value compatible with a missing value? **YES**: 'unknown'.
Therefore, we can replace the value in this column by **'U'** for 'unknown'.
```
data['HOMEOWNR'].value_counts()
np.unique(data['HOMEOWNR']).tolist()
data['HOMEOWNR'] = np.where(data['HOMEOWNR'] == ' ','U','H')
data.head()
```
## Interpolation
Let's see which kind interpolation between two consecutive missing values for the **'INCOME'** column is best.
First **LOOK AT YOUR DATA!!!**
```
data[['INCOME']].head()
sns.histplot(data['INCOME'])
```
Let's try first with linear interpolation
```
new_income_data_linear = data['INCOME'].interpolate(method='linear')
sns.histplot(new_income_data_linear)
```
Akima's interpolation
```
new_income_data_akima = data['INCOME'].interpolate(method='akima')
sns.histplot(new_income_data_akima)
```
Polynomial order 3.
```
new_income_data_poly = data['INCOME'].interpolate(method='polynomial', order=3)
sns.histplot(new_income_data_poly)
```
Imputing with the mean
```
# Testing interpolation method with mean and median methods
points2 = data['INCOME'].fillna(np.mean(data['INCOME']))
sns.histplot(points2)
```
Does it makes sense at all?
# Activity: Using linear regression to impute missing values.
You already know how to predict a numerical ammount. Therefore, you can use other columns in order to predict the missing values of the column of you interest. Use 'HV1' and 'IC1' columns to predict the missing values of 'INCOME'.
**Hint**: For sake of simplicity, when you have NaN s, you work with them as if they were a test set.
```
data.shape
data['INCOME'].isna().sum()
data['INCOME'].value_counts(dropna=False)
data.columns
pd.__version__
np.__version__
from sklearn.linear_model import LinearRegression
X = data[~data.INCOME.isna()][['HV1', 'IC1']]
y = data[~data.INCOME.isna()]['INCOME']
X_nulls = data[data.INCOME.isna()][['HV1', 'IC1']]
#X_nulls2 = data[data['INCOME']][['HV1', 'IC1']]
#X_nulls2 = data.loc['INCOME',['HV1','IC1']]
model = LinearRegression().fit(X,y)
income_pred = model.predict(X_nulls)
#income_pred2 = model.predict(X_nulls2)
pd.DataFrame(np.around(income_pred,0)).isna().sum()
#income_pred.isna().sum()
#pd.DataFrame(np.round(income_pred)).isna().sum()
data[data.INCOME.isnull()]['INCOME'] = np.around(income_pred,0) # Income values are integers, therefore, we need to round!
data.head()
```
Let's explore now the column 'TIMELAG'
```
sns.histplot(data['TIMELAG'])
sns.boxplot(x=data['TIMELAG'])
ax = sns.distplot(data['TIMELAG'])
ax2 = ax.twinx()
sns.boxplot(x=data['TIMELAG'], ax=ax2)
ax2.set(ylim=(-.5, 10))
```
Let's try some transformations to see if we can improve the distribution.
```
def log_transfom_clean_(x):
if np.isfinite(x) and x!=0: # If the value is finite and != 0...
return np.log(x)
else:
return np.NAN # We are returning NaNs so that we can replace them later
def sqrt_transfom_clean_(x):
if np.isfinite(x) and x>=0:
return np.sqrt(x)
else:
return np.NAN # We are returning NaNs so that we can replace them later
# Using the functions to check the distribution of transformed data
pd.Series(map(log_transfom_clean_, data['TIMELAG'])).hist()
plt.show()
pd.Series(map(sqrt_transfom_clean_, data['TIMELAG'])).hist()
plt.show()
```
As it can be seen in the figure, the logaritmic transformation works better than the sqrt.
This could be expected given the extreme skewness of the data.
We could also us Box-Cox transformation, but probably the resulting distribution will be similar (alothough not the same, and even more, better). However, tihs is an illustrative example of how to proceed.
Let's assume that we don't know about Box-Cox and we want to apply the logaritmic transformation to the **'TIMELAG'** column.
```
data['TIMELAG'] = list(map(log_transfom_clean_, data['TIMELAG']))
```
Remember that our function, ignored the'0' and infinite values. The may want to replace them the mean of the NEW distribution.
```
data['TIMELAG'] = data['TIMELAG'].fillna(np.mean(data['TIMELAG']))
sns.distplot(data['TIMELAG'])
plt.show()
```
It's not perfectly Gaussian but we improved it a lot.
# Activity: Logarithmic transformation.
A logarithmic scale is common to visualize exponential data as they are the inverse function of each other, so the result would be a linear visualization. This is needed because we visualize exponential functions properly otherwise. As an example, you can see some corona virus visualizations, like [this one] (https://education-team-2020.s3-eu-west-1.amazonaws.com/data-analytics/4.1-COVID-Logarithmicvslinear.png). Check the log transform with the IC n columns.
```
sns.distplot(data['IC1'])
#sns.distplot(np.log(data['IC1']))
data['IC1'].describe()
fig,axes=plt.subplots(1,2)
sns.distplot(data['IC1'], ax=axes[0], axlabel='IC1')
sns.distplot(np.log(data['IC1']+1), ax=axes[1], axlabel='log(IC1+1)')
fig,axes=plt.subplots(1,2)
sns.distplot(data['IC2'], ax=axes[0], axlabel='IC2')
sns.distplot(np.log(data['IC2']+1), ax=axes[1], axlabel='log(IC2+1)')
fig,axes=plt.subplots(1,2)
sns.distplot(data['IC3'], ax=axes[0], axlabel = 'IC3')
sns.distplot(np.log(data['IC3']+1), ax=axes[1], axlabel = 'log(IC3+1)')
fig,axes=plt.subplots(1,2)
sns.distplot(data['IC4'], ax=axes[0], axlabel = 'IC4')
sns.distplot(np.log(data['IC4']+1), ax=axes[1], axlabel = 'log(IC4+1)')
```
Even after using the transformation, there is still some skewness in the column TIMELAG . We will remove the outliers only from the right side of the distribution plot.
```
sns.distplot(data['TIMELAG'])
```
Let's start knowing how many values will be removed if we decide to drop all the values beyond the upper wisker.
```
iqr = np.percentile(data['TIMELAG'],75) - np.percentile(data['TIMELAG'],25)
upper_limit = np.percentile(data['TIMELAG'],75) + 1.5*iqr
print("The upper wisker is at: %4.2f" % upper_limit)
outliers = data[data['TIMELAG'] > upper_limit].index.tolist()
print("The number of points outise the upper wisker is: ",len(outliers))
```
## Filtering outliers
Let's explore two different ways to drop outliers.
### Filter function. filter(lambda_function, column)
```
points = list(filter(lambda x: x < upper_limit, data['TIMELAG']))
len(points)
```
### Panda's approach
```
data = data[data['TIMELAG'] < upper_limit]
sns.distplot(data['TIMELAG'])
plt.show()
```
# Activity:
Let's learn how the following functions work::
* Map
* Filter
* Reduce
## Map
This function, applies another given function to every element of a set.
It works **elementwise**.
```
list(map(str,range(15)))
```
## Filter
This other function also works elementwise but it returns the elements which met a condition.
```
list(filter(lambda x: x %2 == 0,range(15)))
```
## Reduce
This other function performs some computation on a list and returns the output of that computation applied all over the list. Is **NOT ELEMENTWISE**
```
from functools import reduce
lst = list(range(6))
print("The list is: ",lst)
print("The result of appliying the reduce over the list is: ",reduce(lambda a,b: a+b,lst))
```
# Lesson 1 Key Concepts
## Selecting categorical data
```
categoricals = data.select_dtypes(np.object)
categoricals.head()
```
Let's check the number of missing values for 'PVASTATE' column
```
data['PVASTATE'].value_counts()
```
Now for columns 'RECP3'
```
data['RECP3'].value_counts()
```
And finally for 'VETERANS' column
```
data['VETERANS'].value_counts()
```
Those columns have too much missing values. If we drop the rows containing those NA's we're in the risk of shrinking too much our dataset. Currently, we can't do too much with columns for which there are so many missing values, therefore let's drop them.
```
data = data.drop(columns=['PVASTATE', 'RECP3', 'VETERANS'], axis=1)
```
# Activity:
For the column 'DOMAIN', discuss which option is better to clean the rows where the values are empty.
* Option 1: Filtering the rows with the empty values.
* Option 2: Replacing the empty values with some other category, the most frequently represented value in that column.
```
data['DOMAIN'].value_counts()
unique_values = list(np.unique(data['DOMAIN']))
print(unique_values)
```
This column a lot of many possible different values. It's difficult to think how to impute this values and the number of missing values is quite small compared with the aggregated amount of non missing values. Therefore, dropping the missing values will not hurt.
```
data = data[data['DOMAIN'].isin(unique_values[1:])]
# Note after you filter, it is a good practice to reset the index
data = data.reset_index(drop=True)
data.head()
```
Let's check what we have now.
```
data["DOMAIN"].value_counts()
#filter(lambda x: x != " ",data['DOMAIN'])
```
# Lesson 2 Key Concepts
Let's consider the column 'GENDER'
```
data['GENDER'].value_counts()
```
No missing values as we cleaned it this morning ;)
Now let's see if there are differences between the the 'AVGGIFT' by gender.
```
# Visually analyzing categorical data with Target variable
sns.boxplot(x="GENDER", y="AVGGIFT", data=data)
plt.show()
ax1 = sns.distplot(data['AVGGIFT'][data['GENDER'] == 'M'], color = 'Red')
ax2 = sns.distplot(data['AVGGIFT'][data['GENDER'] == 'F'], color = 'Blue')
plt.xlim(0, 200)
```
Both groups doesn't look too different. They have a few outliers.
**HOWEVER**, be carefull. You don't know now how the distributions look inside the boxes!
Let's check the average gift by gender.
```
sns.barplot(x="GENDER", y="AVGGIFT", data=data)
plt.show()
```
We can conclude that the company doesn't make significant differences on the gift according to the gender. Therefore let's remove this column.
```
data = data.drop(columns=['GENDER'], axis=1)
```
# Activity:
There is a more efficient way to use map over pandas dataframes, and it is called [apply](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.apply.html)
```
#data['GENDER'] = data['GENDER'].apply(lambda x: 'other' if x in ['',' ' ,'U', 'C', 'J', 'A'] else x)
```
# Lesson 3 Key Concepts.
## Dealing with a large number of categories
Let's inspect the column 'STATE'
```
state_values = list(np.unique(data['STATE']))
state_values
```
Huuummm, there are typos... Which is 'AA'?. A current list of abreviations can be found [here](https://www.ssa.gov/international/coc-docs/states.html)
```
real_states = ['AL','AK','AS','AZ','AR','CA','CO','CT','DE','DC','FL','GA','GU','HI','ID','IL','IN','IA','KS',
'KY','LA','ME','MD','MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ','NM','NY','NC','ND','MP','OH','OK','OR',
'PA','PR','RI','SC','SD','TN','TX','UT','VT','VA','VI','WA','WV','WI','WY']
```
First we are going to filter out values which doesn't correspond to any of the previous list.
```
data = data[data['STATE'].isin(real_states)]
```
Now, let's check the frequencies of each state.
```
vals = pd.DataFrame(data['STATE'].value_counts())
vals = vals.reset_index()
vals.columns = ['state', 'counts']
vals
```
As we can see there are states which are under represented. We have several options.
* Group the states in smaller groups.
* Group under represented states in a single group.
* A combination of both.
We will use the last option.
Given the previous state frequencies, can you guess any business insigth?
First, let's get the states which are under represented.
```
group_states_df = vals[vals['counts']<2500]
group_states = list(group_states_df['state'])
group_states
def clean_state(x):
if x in group_states:
return 'other'
else:
return x
data['STATE'] = list(map(clean_state, data['STATE']))
```
What are now our final groups?
```
new_state_values = list(np.unique(data['STATE']))
new_state_values
```
## Binning numerical columns.
Let's see rigth now the 'IC2' column. This column is numerical, but we would like to make it categorical using. **binning**
```
ic2_labels = ['Low', 'Moderate', 'High', 'Very High']
data['IC2_NEW'] = pd.cut(data['IC2'],4, labels=ic2_labels) # see: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.cut.html?highlight=cut#pandas.cut
data['IC2_NEW'].value_counts()
```
# Activity:
Use the column MDMAUD to reduce the number of categories to two (XXXX and other).
```
data["MDMAUD"]
```
# Lesson 4 Key Concepts.
Regular expressions in Python.
[see here](https://docs.python.org/3/library/re.html)
[practice here](https://pythex.org/)
*: Matches previous character 0 or more times.
+: Matches previous character 1 or more times.
?: Matches previous character 0 or 1 times (optional).
{}: Matches previous characters however many times specified within:.
{n}: Exactly n times.
{n,}: At least n times.
{n,m}: Between n and m times.
```
import re
```
Let's see some examples.
```
text = "The complicit caat interacted with the other cats exactly as we expected."
pattern = "c*t"
print(re.findall(pattern, text))
text = "The complicit caat interacted with the other cats exactly as we expected."
pattern = 'c*a*t'
print(re.findall(pattern, text))
text = "The complicit caaaat ct interacted with the other cats exactly as we expected."
pattern = "a+"
print(re.findall(pattern, text))
text = "Is the correct spelling color or colour?"
pattern = "colou?r"
print(re.findall(pattern, text))
text = "We can match the following: aaaawwww, aww, awww, awwww, awwwww"
pattern = "aw{3}"
print(re.findall(pattern, text))
text = "Let's see how we can match the following: aaw, aaww, aawww, awwww, awwwww"
pattern = "aw{1,}"
print(re.findall(pattern, text))
pattern = "a{2,}w{2,}"
print(re.findall(pattern, text))
```
# Activity:
Create a function to automate the process of reducing the number of values of a categorical column.
|
github_jupyter
|
import pandas as pd
import numpy as np
import datetime
import warnings
warnings.filterwarnings('ignore')
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
%matplotlib inline
pd.set_option('display.max_columns', None)
data = pd.read_csv('unit4.csv')
data.head()
data.shape
data.info()
nulls = pd.DataFrame(data.isna().sum()/len(data))
nulls= nulls.reset_index()
nulls.columns = ['column_name', 'Percentage Null Values']
nulls.sort_values(by='Percentage Null Values', ascending = False)
numericals = data.select_dtypes(np.number)
numericals.head()
sns.histplot(data['INCOME'])
data['GENDER'].value_counts()
to_drop = data[~data['GENDER'].isin(['F','M'])].index.tolist()
data.drop(to_drop, inplace = True)
data.reset_index(drop=True)
data['GENDER'].value_counts()
data['HOMEOWNR'].value_counts()
np.unique(data['HOMEOWNR']).tolist()
data['HOMEOWNR'] = np.where(data['HOMEOWNR'] == ' ','U','H')
data.head()
data[['INCOME']].head()
sns.histplot(data['INCOME'])
new_income_data_linear = data['INCOME'].interpolate(method='linear')
sns.histplot(new_income_data_linear)
new_income_data_akima = data['INCOME'].interpolate(method='akima')
sns.histplot(new_income_data_akima)
new_income_data_poly = data['INCOME'].interpolate(method='polynomial', order=3)
sns.histplot(new_income_data_poly)
# Testing interpolation method with mean and median methods
points2 = data['INCOME'].fillna(np.mean(data['INCOME']))
sns.histplot(points2)
data.shape
data['INCOME'].isna().sum()
data['INCOME'].value_counts(dropna=False)
data.columns
pd.__version__
np.__version__
from sklearn.linear_model import LinearRegression
X = data[~data.INCOME.isna()][['HV1', 'IC1']]
y = data[~data.INCOME.isna()]['INCOME']
X_nulls = data[data.INCOME.isna()][['HV1', 'IC1']]
#X_nulls2 = data[data['INCOME']][['HV1', 'IC1']]
#X_nulls2 = data.loc['INCOME',['HV1','IC1']]
model = LinearRegression().fit(X,y)
income_pred = model.predict(X_nulls)
#income_pred2 = model.predict(X_nulls2)
pd.DataFrame(np.around(income_pred,0)).isna().sum()
#income_pred.isna().sum()
#pd.DataFrame(np.round(income_pred)).isna().sum()
data[data.INCOME.isnull()]['INCOME'] = np.around(income_pred,0) # Income values are integers, therefore, we need to round!
data.head()
sns.histplot(data['TIMELAG'])
sns.boxplot(x=data['TIMELAG'])
ax = sns.distplot(data['TIMELAG'])
ax2 = ax.twinx()
sns.boxplot(x=data['TIMELAG'], ax=ax2)
ax2.set(ylim=(-.5, 10))
def log_transfom_clean_(x):
if np.isfinite(x) and x!=0: # If the value is finite and != 0...
return np.log(x)
else:
return np.NAN # We are returning NaNs so that we can replace them later
def sqrt_transfom_clean_(x):
if np.isfinite(x) and x>=0:
return np.sqrt(x)
else:
return np.NAN # We are returning NaNs so that we can replace them later
# Using the functions to check the distribution of transformed data
pd.Series(map(log_transfom_clean_, data['TIMELAG'])).hist()
plt.show()
pd.Series(map(sqrt_transfom_clean_, data['TIMELAG'])).hist()
plt.show()
data['TIMELAG'] = list(map(log_transfom_clean_, data['TIMELAG']))
data['TIMELAG'] = data['TIMELAG'].fillna(np.mean(data['TIMELAG']))
sns.distplot(data['TIMELAG'])
plt.show()
sns.distplot(data['IC1'])
#sns.distplot(np.log(data['IC1']))
data['IC1'].describe()
fig,axes=plt.subplots(1,2)
sns.distplot(data['IC1'], ax=axes[0], axlabel='IC1')
sns.distplot(np.log(data['IC1']+1), ax=axes[1], axlabel='log(IC1+1)')
fig,axes=plt.subplots(1,2)
sns.distplot(data['IC2'], ax=axes[0], axlabel='IC2')
sns.distplot(np.log(data['IC2']+1), ax=axes[1], axlabel='log(IC2+1)')
fig,axes=plt.subplots(1,2)
sns.distplot(data['IC3'], ax=axes[0], axlabel = 'IC3')
sns.distplot(np.log(data['IC3']+1), ax=axes[1], axlabel = 'log(IC3+1)')
fig,axes=plt.subplots(1,2)
sns.distplot(data['IC4'], ax=axes[0], axlabel = 'IC4')
sns.distplot(np.log(data['IC4']+1), ax=axes[1], axlabel = 'log(IC4+1)')
sns.distplot(data['TIMELAG'])
iqr = np.percentile(data['TIMELAG'],75) - np.percentile(data['TIMELAG'],25)
upper_limit = np.percentile(data['TIMELAG'],75) + 1.5*iqr
print("The upper wisker is at: %4.2f" % upper_limit)
outliers = data[data['TIMELAG'] > upper_limit].index.tolist()
print("The number of points outise the upper wisker is: ",len(outliers))
points = list(filter(lambda x: x < upper_limit, data['TIMELAG']))
len(points)
data = data[data['TIMELAG'] < upper_limit]
sns.distplot(data['TIMELAG'])
plt.show()
list(map(str,range(15)))
list(filter(lambda x: x %2 == 0,range(15)))
from functools import reduce
lst = list(range(6))
print("The list is: ",lst)
print("The result of appliying the reduce over the list is: ",reduce(lambda a,b: a+b,lst))
categoricals = data.select_dtypes(np.object)
categoricals.head()
data['PVASTATE'].value_counts()
data['RECP3'].value_counts()
data['VETERANS'].value_counts()
data = data.drop(columns=['PVASTATE', 'RECP3', 'VETERANS'], axis=1)
data['DOMAIN'].value_counts()
unique_values = list(np.unique(data['DOMAIN']))
print(unique_values)
data = data[data['DOMAIN'].isin(unique_values[1:])]
# Note after you filter, it is a good practice to reset the index
data = data.reset_index(drop=True)
data.head()
data["DOMAIN"].value_counts()
#filter(lambda x: x != " ",data['DOMAIN'])
data['GENDER'].value_counts()
# Visually analyzing categorical data with Target variable
sns.boxplot(x="GENDER", y="AVGGIFT", data=data)
plt.show()
ax1 = sns.distplot(data['AVGGIFT'][data['GENDER'] == 'M'], color = 'Red')
ax2 = sns.distplot(data['AVGGIFT'][data['GENDER'] == 'F'], color = 'Blue')
plt.xlim(0, 200)
sns.barplot(x="GENDER", y="AVGGIFT", data=data)
plt.show()
data = data.drop(columns=['GENDER'], axis=1)
#data['GENDER'] = data['GENDER'].apply(lambda x: 'other' if x in ['',' ' ,'U', 'C', 'J', 'A'] else x)
state_values = list(np.unique(data['STATE']))
state_values
real_states = ['AL','AK','AS','AZ','AR','CA','CO','CT','DE','DC','FL','GA','GU','HI','ID','IL','IN','IA','KS',
'KY','LA','ME','MD','MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ','NM','NY','NC','ND','MP','OH','OK','OR',
'PA','PR','RI','SC','SD','TN','TX','UT','VT','VA','VI','WA','WV','WI','WY']
data = data[data['STATE'].isin(real_states)]
vals = pd.DataFrame(data['STATE'].value_counts())
vals = vals.reset_index()
vals.columns = ['state', 'counts']
vals
group_states_df = vals[vals['counts']<2500]
group_states = list(group_states_df['state'])
group_states
def clean_state(x):
if x in group_states:
return 'other'
else:
return x
data['STATE'] = list(map(clean_state, data['STATE']))
new_state_values = list(np.unique(data['STATE']))
new_state_values
ic2_labels = ['Low', 'Moderate', 'High', 'Very High']
data['IC2_NEW'] = pd.cut(data['IC2'],4, labels=ic2_labels) # see: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.cut.html?highlight=cut#pandas.cut
data['IC2_NEW'].value_counts()
data["MDMAUD"]
import re
text = "The complicit caat interacted with the other cats exactly as we expected."
pattern = "c*t"
print(re.findall(pattern, text))
text = "The complicit caat interacted with the other cats exactly as we expected."
pattern = 'c*a*t'
print(re.findall(pattern, text))
text = "The complicit caaaat ct interacted with the other cats exactly as we expected."
pattern = "a+"
print(re.findall(pattern, text))
text = "Is the correct spelling color or colour?"
pattern = "colou?r"
print(re.findall(pattern, text))
text = "We can match the following: aaaawwww, aww, awww, awwww, awwwww"
pattern = "aw{3}"
print(re.findall(pattern, text))
text = "Let's see how we can match the following: aaw, aaww, aawww, awwww, awwwww"
pattern = "aw{1,}"
print(re.findall(pattern, text))
pattern = "a{2,}w{2,}"
print(re.findall(pattern, text))
| 0.482673 | 0.889385 |
[[source]](../api/alibi.explainers.counterfactual.rst)
# Counterfactual Instances
## Overview
A counterfactual explanation of an outcome or a situation $Y$ takes the form "If $X$ had not occured, $Y$ would not have occured" ([Interpretable Machine Learning](https://christophm.github.io/interpretable-ml-book/counterfactual.html)). In the context of a machine learning classifier $X$ would be an instance of interest and $Y$ would be the label predicted by the model. The task of finding a counterfactual explanation is then to find some $X^\prime$ that is in some way related to the original instance $X$ but leading to a different prediction $Y^\prime$. Reasoning in counterfactual terms is very natural for humans, e.g. asking what should have been done differently to achieve a different result. As a consequence counterfactual instances for machine learning predictions is a promising method for human-interpretable explanations.
The counterfactual method described here is the most basic way of defining the problem of finding such $X^\prime$. Our algorithm loosely follows Wachter et al. (2017): [Counterfactual Explanations without Opening the Black Box: Automated Decisions and the GDPR](https://arxiv.org/abs/1711.00399). For an extension to the basic method which provides ways of finding higher quality counterfactual instances $X^\prime$ in a quicker time, please refer to [Counterfactuals Guided by Prototypes](CFProto.ipynb).
We can reason that the most basic requirements for a counterfactual $X^\prime$ are as follows:
- The predicted class of $X^\prime$ is different from the predicted class of $X$
- The difference between $X$ and $X^\prime$ should be human-interpretable.
While the first condition is straight-forward, the second condition does not immediately lend itself to a condition as we need to first define "interpretability" in a mathematical sense. For this method we restrict ourselves to a particular definition by asserting that $X^\prime$ should be as close as possible to $X$ without violating the first condition. The main issue with this definition of "interpretability" is that the difference between $X^\prime$ and $X$ required to change the model prediciton might be so small as to be un-interpretable to the human eye in which case [we need a more sophisticated approach](CFProto.ipynb).
That being said, we can now cast the search for $X^\prime$ as a simple optimization problem with the following loss:
$$L = L_{\text{pred}} + \lambda L_{\text{dist}},$$
where the first loss term $L_{\text{pred}}$ guides the search towards points $X^\prime$ which would change the model prediction and the second term $\lambda L_{\text{dist}}$ ensures that $X^\prime$ is close to $X$. This form of loss has a single hyperparameter $\lambda$ weighing the contributions of the two competing terms.
The specific loss in our implementation is as follows:
$$L(X^\prime\vert X) = (f_t(X^\prime) - p_t)^2 + \lambda L_1(X^\prime, X).$$
Here $t$ is the desired target class for $X^\prime$ which can either be specified in advance or left up to the optimization algorithm to find, $p_t$ is the target probability of this class (typically $p_t=1$), $f_t$ is the model prediction on class $t$ and $L_1$ is the distance between the proposed counterfactual instance $X^\prime$ and the instance to be explained $X$. The use of the $L_1$ distance should ensure that the $X^\prime$ is a sparse counterfactual - minimizing the number of features to be changed in order to change the prediction.
The optimal value of the hyperparameter $\lambda$ will vary from dataset to dataset and even within a dataset for each instance to be explained and the desired target class. As such it is difficult to set and we learn it as part of the optimization algorithm, i.e. we want to optimize
$$\min_{X^{\prime}}\max_{\lambda}L(X^\prime\vert X)$$
subject to
$$\vert f_t(X^\prime)-p_t\vert\leq\epsilon \text{ (counterfactual constraint)},$$
where $\epsilon$ is a tolerance parameter. In practice this is done in two steps, on the first pass we sweep a broad range of $\lambda$, e.g. $\lambda\in(10^{-1},\dots,10^{-10}$) to find lower and upper bounds $\lambda_{\text{lb}}, \lambda_{\text{ub}}$ where counterfactuals exist. Then we use bisection to find the maximum $\lambda\in[\lambda_{\text{lb}}, \lambda_{\text{ub}}]$ such that the counterfactual constraint still holds. The result is a set of counterfactual instances $X^\prime$ with varying distance from the test instance $X$.
## Usage
### Initialization
The counterfactual (CF) explainer method works on fully black-box models, meaning they can work with arbitrary functions that take arrays and return arrays. However, if the user has access to a full TensorFlow (TF) or Keras model, this can be passed in as well to take advantage of the automatic differentiation in TF to speed up the search. This section describes the initialization for a TF/Keras model, for fully black-box models refer to [numerical gradients](#Numerical-Gradients).
First we load the TF/Keras model:
```python
model = load_model('my_model.h5')
```
Then we can initialize the counterfactual object:
```python
shape = (1,) + x_train.shape[1:]
cf = CounterFactual(model, shape, distance_fn='l1', target_proba=1.0,
target_class='other', max_iter=1000, early_stop=50, lam_init=1e-1,
max_lam_steps=10, tol=0.05, learning_rate_init=0.1,
feature_range=(-1e10, 1e10), eps=0.01, init='identity',
decay=True, write_dir=None, debug=False)
```
Besides passing the model, we set a number of **hyperparameters** ...
... **general**:
* `shape`: shape of the instance to be explained, starting with batch dimension. Currently only single explanations are supported, so the batch dimension should be equal to 1.
* `feature_range`: global or feature-wise min and max values for the perturbed instance.
* `write_dir`: write directory for Tensorboard logging of the loss terms. It can be helpful when tuning the hyperparameters for your use case. It makes it easy to verify that e.g. not 1 loss term dominates the optimization, that the number of iterations is OK etc. You can access Tensorboard by running `tensorboard --logdir {write_dir}` in the terminal.
* `debug`: flag to enable/disable writing to Tensorboard.
... related to the **optimizer**:
* `max_iterations`: number of loss optimization steps for each value of $\lambda$; the multiplier of the distance loss term.
* `learning_rate_init`: initial learning rate, follows linear decay.
* `decay`: flag to disable learning rate decay if desired
* `early_stop`: early stopping criterion for the search. If no counterfactuals are found for this many steps or if this many counterfactuals are found in a row we change $\lambda$ accordingly and continue the search.
* `init`: how to initialize the search, currently only `"identity"` is supported meaning the search starts from the original instance.
... related to the **objective function**:
* `distance_fn`: distance function between the test instance $X$ and the proposed counterfactual $X^\prime$, currently only `"l1"` is supported.
* `target_proba`: desired target probability for the returned counterfactual instance. Defaults to `1.0`, but it could be useful to reduce it to allow a looser definition of a counterfactual instance.
* `tol`: the tolerance within the `target_proba`, this works in tandem with `target_proba` to specify a range of acceptable predicted probability values for the counterfactual.
* `target_class`: desired target class for the returned counterfactual instance. Can be either an integer denoting the specific class membership or the string `other` which will find a counterfactual instance whose predicted class is anything other than the class of the test instance.
* `lam_init`: initial value of the hyperparameter $\lambda$. This is set to a high value $\lambda=1e^{-1}$ and annealed during the search to find good bounds for $\lambda$ and for most applications should be fine to leave as default.
* `max_lam_steps`: the number of steps (outer loops) to search for with a different value of $\lambda$.
While the default values for the loss term coefficients worked well for the simple examples provided in the notebooks, it is recommended to test their robustness for your own applications.
<div class="alert alert-warning">
Warning
Once a `CounterFactual` instance is initialized, the parameters of it are frozen even if creating a new instance. This is due to TensorFlow behaviour which holds on to some global state. In order to change parameters of the explainer in the same session (e.g. for explaining different models), you will need to reset the TensorFlow graph manually:
```python
import tensorflow as tf
tf.keras.backend.clear_session()
```
You may need to reload your model after this. Then you can create a new `CounterFactual` instance with new parameters.
</div>
### Fit
The method is purely unsupervised so no fit method is necessary.
### Explanation
We can now explain the instance $X$:
```python
explanation = cf.explain(X)
```
The ```explain``` method returns a dictionary with the following *key: value* pairs:
* *cf*: dictionary containing the counterfactual instance found with the smallest distance to the test instance, it has the following keys:
* *X*: the counterfactual instance
* *distance*: distance to the original instance
* *lambda*: value of $\lambda$ corresponding to the counterfactual
* *index*: the step in the search procedure when the counterfactual was found
* *class*: predicted class of the counterfactual
* *proba*: predicted class probabilities of the counterfactual
* *loss*: counterfactual loss
* *orig_class*: predicted class of original instance
* *orig_proba*: predicted class probabilites of the original instance
* *all*: dictionary of all instances encountered during the search that satisfy the counterfactual constraint but have higher distance to the original instance than the returned counterfactual. This is organized by levels of $\lambda$, i.e. ```explanation['all'][0]``` will be a list of dictionaries corresponding to instances satisfying the counterfactual condition found in the first iteration over $\lambda$ during bisection.
### Numerical Gradients
So far, the whole optimization problem could be defined within the TF graph, making automatic differentiation possible. It is however possible that we do not have access to the model architecture and weights, and are only provided with a ```predict``` function returning probabilities for each class. The counterfactual can then be initialized in the same way as before, but using a prediction function:
```python
# define model
model = load_model('mnist_cnn.h5')
predict_fn = lambda x: cnn.predict(x)
# initialize explainer
shape = (1,) + x_train.shape[1:]
cf = CounterFactual(predict_fn, shape, distance_fn='l1', target_proba=1.0,
target_class='other', max_iter=1000, early_stop=50, lam_init=1e-1,
max_lam_steps=10, tol=0.05, learning_rate_init=0.1,
feature_range=(-1e10, 1e10), eps=0.01, init
```
In this case, we need to evaluate the gradients of the loss function with respect to the input features $X$ numerically:
\begin{equation*} \frac{\partial L_{\text{pred}}}{\partial X} = \frac{\partial L_\text{pred}}{\partial p} \frac{\partial p}{\partial X} \end{equation*}
where $L_\text{pred}$ is the predict function loss term, $p$ the predict function and $x$ the input features to optimize. There is now an additional hyperparameter to consider:
* `eps`: a float or an array of floats to define the perturbation size used to compute the numerical gradients of $^{\delta p}/_{\delta X}$. If a single float, the same perturbation size is used for all features, if the array dimension is *(1 x nb of features)*, then a separate perturbation value can be used for each feature. For the Iris dataset, `eps` could look as follows:
```python
eps = np.array([[1e-2, 1e-2, 1e-2, 1e-2]]) # 4 features, also equivalent to eps=1e-2
```
## Examples
[Counterfactual instances on MNIST](../examples/cf_mnist.nblink)
|
github_jupyter
|
model = load_model('my_model.h5')
shape = (1,) + x_train.shape[1:]
cf = CounterFactual(model, shape, distance_fn='l1', target_proba=1.0,
target_class='other', max_iter=1000, early_stop=50, lam_init=1e-1,
max_lam_steps=10, tol=0.05, learning_rate_init=0.1,
feature_range=(-1e10, 1e10), eps=0.01, init='identity',
decay=True, write_dir=None, debug=False)
import tensorflow as tf
tf.keras.backend.clear_session()
explanation = cf.explain(X)
# define model
model = load_model('mnist_cnn.h5')
predict_fn = lambda x: cnn.predict(x)
# initialize explainer
shape = (1,) + x_train.shape[1:]
cf = CounterFactual(predict_fn, shape, distance_fn='l1', target_proba=1.0,
target_class='other', max_iter=1000, early_stop=50, lam_init=1e-1,
max_lam_steps=10, tol=0.05, learning_rate_init=0.1,
feature_range=(-1e10, 1e10), eps=0.01, init
eps = np.array([[1e-2, 1e-2, 1e-2, 1e-2]]) # 4 features, also equivalent to eps=1e-2
| 0.557845 | 0.971725 |
```
from diff_classifier import features as ft
from diff_classifier import heatmaps as hm
from diff_register import im_process as imp
from diff_classifier import aws
import pandas as pd
import skimage.io as sio
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
import xml.etree.ElementTree as et
```
## Trajectory Data
```
prefix = '100nm_S2_XY1_1'
fname = 'features_{}.csv'.format(prefix)
features = pd.read_csv(fname, index_col='Unnamed: 0')
features.head()
rfolder = 'Tissue_Studies/04_23_18_Registration_Test/tracking'
aws.download_s3('{}/{}.tif'.format(rfolder, prefix), '{}.tif'.format(prefix), bucket_name='ccurtis.data')
trim = sio.imread('{}.tif'.format(prefix))
fig = plt.figure(figsize=(11, 11))
plt.imshow(trim[0, :, :]>0.1, vmin=0, vmax=2)
plt.axis('off')
hm.plot_scatterplot(prefix, feature='AR', vmin=0, vmax=2, upload=False)
```
## Cell Data
```
cprefix = 'S1_NUC'
cfile = '{}.tif'.format(cprefix)
cfolder = 'Tissue_Studies/04_23_18_Registration_Test'
#aws.download_s3('{}/{}'.format(cfolder, cfile), cfile, bucket_name='ccurtis.data')
cim = sio.imread(cfile)
cim.shape
ndim = 512
vpoint = (1500, 1600)
subim = cim[int(vpoint[0]-ndim/2):int(vpoint[0]+ndim/2), int(vpoint[1]-ndim/2):int(vpoint[1]+ndim/2)]
subim.shape
xmlfile = 'multipoints_S2.xml'
def read_xmlpoints(xmlfile, converttopix = True, umppx=0.62, offset=(17000, -1460)):
"""
Parameters
----------
xmlfile: XML file containing locations at which trajectory videos were collected.
converttopix: User indicates whether points should be converted to pixels within
the cell tilescan image, or remain in microns.
umppx: microns per pixel. Pixel density of cell tilescan image.
offset:
"""
tree = et.parse(xmlfile)
root = tree.getroot()
y = []
x = []
xmlpoints = []
counter = 0
for point in root[0]:
if counter > 1:
x = float(point[2].attrib['value'])
y = float(point[3].attrib['value'])
if converttopix:
xmlpoints.append(((x-offset[0])/umppx,(y-offset[1])/umppx))
else:
xmlpoints.append((x, y))
counter = counter + 1
return xmlpoints
xmlpoints = read_xmlpoints(xmlfile, converttopix=False)
def crop_to_videodims(cell_image, multichannel = False, vidpoint=(600, 600), defaultdims=True, dim=512, save=True,
fname='test.tif'):
if defaultdims:
ndim = 512
else:
ndim = dim
if not multichannel:
subim = cell_image[int(vidpoint[0]-ndim/2):int(vidpoint[0]+ndim/2), int(vidpoint[1]-ndim/2):int(vidpoint[1]+ndim/2)]
if save:
sio.imsave(fname, subim)
return subim
prefix = 'DAPI'
cell_orig = '{}.tif'.format(prefix)
cell_bin = 'clean_{}.png'.format(prefix)
cell_cut = 'short_{}'.format(cell_bin)
call_skel = 'skel_{}'.format(cell_cut)
subim = crop_to_videodims(cim, vidpoint=(10000, 12000), defaultdims=False, dim=994, fname=cell_orig)
fig = plt.figure(figsize=(5, 5))
plt.imshow(subim)
plt.axis('off')
skeleton0, branch_data_short, nbranches, short_image, props = imp.skeleton_image('.', cell_orig, threshold=30, area_thresh=160, figsize=(5, 5), show=True, multichannel=False, channel=0,
disp_binary = True, default_name=True)
features = imp.mglia_features(props, branch_data_short, convert=True, umppx=0.33)
features
```
|
github_jupyter
|
from diff_classifier import features as ft
from diff_classifier import heatmaps as hm
from diff_register import im_process as imp
from diff_classifier import aws
import pandas as pd
import skimage.io as sio
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
import xml.etree.ElementTree as et
prefix = '100nm_S2_XY1_1'
fname = 'features_{}.csv'.format(prefix)
features = pd.read_csv(fname, index_col='Unnamed: 0')
features.head()
rfolder = 'Tissue_Studies/04_23_18_Registration_Test/tracking'
aws.download_s3('{}/{}.tif'.format(rfolder, prefix), '{}.tif'.format(prefix), bucket_name='ccurtis.data')
trim = sio.imread('{}.tif'.format(prefix))
fig = plt.figure(figsize=(11, 11))
plt.imshow(trim[0, :, :]>0.1, vmin=0, vmax=2)
plt.axis('off')
hm.plot_scatterplot(prefix, feature='AR', vmin=0, vmax=2, upload=False)
cprefix = 'S1_NUC'
cfile = '{}.tif'.format(cprefix)
cfolder = 'Tissue_Studies/04_23_18_Registration_Test'
#aws.download_s3('{}/{}'.format(cfolder, cfile), cfile, bucket_name='ccurtis.data')
cim = sio.imread(cfile)
cim.shape
ndim = 512
vpoint = (1500, 1600)
subim = cim[int(vpoint[0]-ndim/2):int(vpoint[0]+ndim/2), int(vpoint[1]-ndim/2):int(vpoint[1]+ndim/2)]
subim.shape
xmlfile = 'multipoints_S2.xml'
def read_xmlpoints(xmlfile, converttopix = True, umppx=0.62, offset=(17000, -1460)):
"""
Parameters
----------
xmlfile: XML file containing locations at which trajectory videos were collected.
converttopix: User indicates whether points should be converted to pixels within
the cell tilescan image, or remain in microns.
umppx: microns per pixel. Pixel density of cell tilescan image.
offset:
"""
tree = et.parse(xmlfile)
root = tree.getroot()
y = []
x = []
xmlpoints = []
counter = 0
for point in root[0]:
if counter > 1:
x = float(point[2].attrib['value'])
y = float(point[3].attrib['value'])
if converttopix:
xmlpoints.append(((x-offset[0])/umppx,(y-offset[1])/umppx))
else:
xmlpoints.append((x, y))
counter = counter + 1
return xmlpoints
xmlpoints = read_xmlpoints(xmlfile, converttopix=False)
def crop_to_videodims(cell_image, multichannel = False, vidpoint=(600, 600), defaultdims=True, dim=512, save=True,
fname='test.tif'):
if defaultdims:
ndim = 512
else:
ndim = dim
if not multichannel:
subim = cell_image[int(vidpoint[0]-ndim/2):int(vidpoint[0]+ndim/2), int(vidpoint[1]-ndim/2):int(vidpoint[1]+ndim/2)]
if save:
sio.imsave(fname, subim)
return subim
prefix = 'DAPI'
cell_orig = '{}.tif'.format(prefix)
cell_bin = 'clean_{}.png'.format(prefix)
cell_cut = 'short_{}'.format(cell_bin)
call_skel = 'skel_{}'.format(cell_cut)
subim = crop_to_videodims(cim, vidpoint=(10000, 12000), defaultdims=False, dim=994, fname=cell_orig)
fig = plt.figure(figsize=(5, 5))
plt.imshow(subim)
plt.axis('off')
skeleton0, branch_data_short, nbranches, short_image, props = imp.skeleton_image('.', cell_orig, threshold=30, area_thresh=160, figsize=(5, 5), show=True, multichannel=False, channel=0,
disp_binary = True, default_name=True)
features = imp.mglia_features(props, branch_data_short, convert=True, umppx=0.33)
features
| 0.503662 | 0.702658 |
```
'''
1. データセットの読み込み
'''
# tensorflow.keras のインポート
from tensorflow import keras
# Fashion-MNISTデータセットの読み込み
(x_train, t_train), (x_test, t_test) = keras.datasets.fashion_mnist.load_data()
'''
2. (28,28)の画像データを(784)のベクトルに変換して正規化を行う
'''
# (60000, 28, 28)の訓練データを(60000, 784)の2階テンソルに変換
tr_x = x_train.reshape(-1, 784)
# 訓練データをfloat32(浮動小数点数)型に、255で割ってスケール変換する
tr_x = tr_x.astype('float32') / 255
# (10000, 28, 28)のテストデータを(10000, 784)の2階テンソルに変換
ts_x = x_test.reshape(-1, 784)
# テストデータをfloat32(浮動小数点数)型に、255で割ってスケール変換する
ts_x = ts_x.astype('float32') / 255
'''
3. 正解ラベルのOne-Hotエンコーディング
'''
# tensorflow.keras のインポート
from tensorflow import keras
# クラスの数
class_num = 10
# 訓練データの正解ラベルをOne-Hot表現に変換
tr_t = keras.utils.to_categorical(t_train, class_num)
# テストデータの正解ラベルをOne-Hot表現に変換
ts_t = keras.utils.to_categorical(t_test, class_num)
'''
4.モデルの作成
'''
class MLP(keras.Model):
'''多層パーセプトロン
Attributes:
l1(Dense): 隠れ層
l2(Dense): 出力層
'''
def __init__(self, hidden_dim, output_dim):
'''
Parameters:
hidden_dim(int): 隠れ層のユニット数(次元)
output_dim(int): 出力層のユニット数(次元)
'''
super().__init__()
# 隠れ層:活性化関数はReLU
self.l1 = keras.layers.Dense(hidden_dim, activation='relu')
# 出力層:活性化関数はソフトマックス
self.l2 = keras.layers.Dense(output_dim, activation='softmax')
def call(self, x):
'''MLPのインスタンスからコールバックされる関数
Parameters: x(ndarray(float32)):訓練データ、または検証データ
Returns(float32): MLPの出力として要素数3の1階テンソル
'''
h = self.l1(x) # 第1層の出力
y = self.l2(h) # 出力層の出力
return y
'''
5.損失関数の定義
'''
# マルチクラス分類のクロスエントロピー誤差を求めるオブジェクト
cce = keras.losses.CategoricalCrossentropy()
def loss(t, y):
'''損失関数
Parameters: t(ndarray(float32)):正解ラベル
y(ndarray(float32)):予測値
Returns: クロスエントロピー誤差
'''
return cce(t, y)
'''
6.勾配降下アルゴリズムによるパラメーターの更新処理
'''
import tensorflow as tf
# 勾配降下アルゴリズムを使用するオプティマイザーを生成
optimizer = keras.optimizers.Adamax(learning_rate=0.002)
# 損失を記録するオブジェクトを生成
train_loss = keras.metrics.Mean()
# カテゴリカルデータの精度を記録するオブジェクトを生成
train_acc = keras.metrics.CategoricalAccuracy()
def train_step(x, t):
'''学習を1回行う
Parameters: x(ndarray(float32)):訓練データ
t(ndarray(float32)):正解ラベル
Returns:
ステップごとのクロスエントロピー誤差
'''
# 自動微分による勾配計算を記録するブロック
with tf.GradientTape() as tape:
# モデルに入力して順伝搬の出力値を取得
outputs = model(x)
# 出力値と正解ラベルの誤差
tmp_loss = loss(t, outputs)
# tapeに記録された操作を使用して誤差の勾配を計算
grads = tape.gradient(
# 現在のステップの誤差
tmp_loss,
# バイアス、重みのリストを取得
model.trainable_variables)
# 勾配降下法の更新式を適用してバイアス、重みを更新
optimizer.apply_gradients(zip(grads,
model.trainable_variables))
# 損失をMeanオブジェクトに記録
train_loss(tmp_loss)
# 精度をCategoricalAccuracyオブジェクトに記録
train_acc(t, outputs)
return tmp_loss
'''
7.訓練データと検証データの用意
'''
from sklearn.model_selection import train_test_split
# 訓練データと検証データに8:2の割合で分割 \は行継続文字
x_train, x_validation, t_train, t_validation = \
train_test_split(tr_x, tr_t, test_size=0.2)
%%time
'''
8.モデルを生成して学習する
'''
from sklearn.utils import shuffle
# エポック数
epochs = 100
# ミニバッチのサイズ
batch_size = 64
#ステップ数
steps = x_train.shape[0] // batch_size
# 隠れ層256ユニット、出力層10ユニットのモデルを生成
model = MLP(256, 10)
# 学習を行う
for epoch in range(epochs):
# 訓練データと正解ラベルをシャッフル
x_, t_ = shuffle(x_train, t_train, random_state=1)
# 1ステップにおけるミニバッチを使用した学習
for step in range(steps):
start = step * batch_size # ミニバッチの先頭インデックス
end = start + batch_size # ミニバッチの末尾のインデックス
# ミニバッチでバイアス、重みを更新して誤差を取得
tmp_loss = train_step(x_[start:end], t_[start:end])
# 1エポックごとに結果を出力
if (epoch + 1) % 10 == 0:
print('epoch({}) train_loss: {:.4} train_acc: {:.4}'.format(
epoch+1,
train_loss.result(), # 現在の損失を出力
train_acc.result() # 現在の精度を出力
))
'''
9. 検証データによるモデルの評価
'''
# 検証データの予測値を取得
val_preds = model(x_validation)
# カテゴリカルデータの精度を取得するオブジェクト
categor_acc = tf.keras.metrics.CategoricalAccuracy()
# 精度を測定するデータを設定
categor_acc.update_state(t_validation, val_preds)
# 検証データの精度を取得
validation_acc = categor_acc.result().numpy()
# 検証データの損失を取得
validation_loss = loss(t_validation, val_preds)
print('validation_loss: {:.4f}, validation_acc: {:.4f}'.format(
validation_loss,
validation_acc
))
'''
10. テストデータによるモデルの評価
'''
# テストデータの予測値を取得
test_preds = model(ts_x)
# カテゴリカルデータの精度を取得するオブジェクト
categor_acc = tf.keras.metrics.CategoricalAccuracy()
# 精度を測定するデータを設定
categor_acc.update_state(ts_t, test_preds)
# テストデータの精度を取得
test_acc = categor_acc.result().numpy()
# テストデータの損失を取得
test_loss = loss(ts_t, test_preds)
print('test_loss: {:.4f}, test_acc: {:.4f}'.format(
test_loss,
test_acc
))
```
|
github_jupyter
|
'''
1. データセットの読み込み
'''
# tensorflow.keras のインポート
from tensorflow import keras
# Fashion-MNISTデータセットの読み込み
(x_train, t_train), (x_test, t_test) = keras.datasets.fashion_mnist.load_data()
'''
2. (28,28)の画像データを(784)のベクトルに変換して正規化を行う
'''
# (60000, 28, 28)の訓練データを(60000, 784)の2階テンソルに変換
tr_x = x_train.reshape(-1, 784)
# 訓練データをfloat32(浮動小数点数)型に、255で割ってスケール変換する
tr_x = tr_x.astype('float32') / 255
# (10000, 28, 28)のテストデータを(10000, 784)の2階テンソルに変換
ts_x = x_test.reshape(-1, 784)
# テストデータをfloat32(浮動小数点数)型に、255で割ってスケール変換する
ts_x = ts_x.astype('float32') / 255
'''
3. 正解ラベルのOne-Hotエンコーディング
'''
# tensorflow.keras のインポート
from tensorflow import keras
# クラスの数
class_num = 10
# 訓練データの正解ラベルをOne-Hot表現に変換
tr_t = keras.utils.to_categorical(t_train, class_num)
# テストデータの正解ラベルをOne-Hot表現に変換
ts_t = keras.utils.to_categorical(t_test, class_num)
'''
4.モデルの作成
'''
class MLP(keras.Model):
'''多層パーセプトロン
Attributes:
l1(Dense): 隠れ層
l2(Dense): 出力層
'''
def __init__(self, hidden_dim, output_dim):
'''
Parameters:
hidden_dim(int): 隠れ層のユニット数(次元)
output_dim(int): 出力層のユニット数(次元)
'''
super().__init__()
# 隠れ層:活性化関数はReLU
self.l1 = keras.layers.Dense(hidden_dim, activation='relu')
# 出力層:活性化関数はソフトマックス
self.l2 = keras.layers.Dense(output_dim, activation='softmax')
def call(self, x):
'''MLPのインスタンスからコールバックされる関数
Parameters: x(ndarray(float32)):訓練データ、または検証データ
Returns(float32): MLPの出力として要素数3の1階テンソル
'''
h = self.l1(x) # 第1層の出力
y = self.l2(h) # 出力層の出力
return y
'''
5.損失関数の定義
'''
# マルチクラス分類のクロスエントロピー誤差を求めるオブジェクト
cce = keras.losses.CategoricalCrossentropy()
def loss(t, y):
'''損失関数
Parameters: t(ndarray(float32)):正解ラベル
y(ndarray(float32)):予測値
Returns: クロスエントロピー誤差
'''
return cce(t, y)
'''
6.勾配降下アルゴリズムによるパラメーターの更新処理
'''
import tensorflow as tf
# 勾配降下アルゴリズムを使用するオプティマイザーを生成
optimizer = keras.optimizers.Adamax(learning_rate=0.002)
# 損失を記録するオブジェクトを生成
train_loss = keras.metrics.Mean()
# カテゴリカルデータの精度を記録するオブジェクトを生成
train_acc = keras.metrics.CategoricalAccuracy()
def train_step(x, t):
'''学習を1回行う
Parameters: x(ndarray(float32)):訓練データ
t(ndarray(float32)):正解ラベル
Returns:
ステップごとのクロスエントロピー誤差
'''
# 自動微分による勾配計算を記録するブロック
with tf.GradientTape() as tape:
# モデルに入力して順伝搬の出力値を取得
outputs = model(x)
# 出力値と正解ラベルの誤差
tmp_loss = loss(t, outputs)
# tapeに記録された操作を使用して誤差の勾配を計算
grads = tape.gradient(
# 現在のステップの誤差
tmp_loss,
# バイアス、重みのリストを取得
model.trainable_variables)
# 勾配降下法の更新式を適用してバイアス、重みを更新
optimizer.apply_gradients(zip(grads,
model.trainable_variables))
# 損失をMeanオブジェクトに記録
train_loss(tmp_loss)
# 精度をCategoricalAccuracyオブジェクトに記録
train_acc(t, outputs)
return tmp_loss
'''
7.訓練データと検証データの用意
'''
from sklearn.model_selection import train_test_split
# 訓練データと検証データに8:2の割合で分割 \は行継続文字
x_train, x_validation, t_train, t_validation = \
train_test_split(tr_x, tr_t, test_size=0.2)
%%time
'''
8.モデルを生成して学習する
'''
from sklearn.utils import shuffle
# エポック数
epochs = 100
# ミニバッチのサイズ
batch_size = 64
#ステップ数
steps = x_train.shape[0] // batch_size
# 隠れ層256ユニット、出力層10ユニットのモデルを生成
model = MLP(256, 10)
# 学習を行う
for epoch in range(epochs):
# 訓練データと正解ラベルをシャッフル
x_, t_ = shuffle(x_train, t_train, random_state=1)
# 1ステップにおけるミニバッチを使用した学習
for step in range(steps):
start = step * batch_size # ミニバッチの先頭インデックス
end = start + batch_size # ミニバッチの末尾のインデックス
# ミニバッチでバイアス、重みを更新して誤差を取得
tmp_loss = train_step(x_[start:end], t_[start:end])
# 1エポックごとに結果を出力
if (epoch + 1) % 10 == 0:
print('epoch({}) train_loss: {:.4} train_acc: {:.4}'.format(
epoch+1,
train_loss.result(), # 現在の損失を出力
train_acc.result() # 現在の精度を出力
))
'''
9. 検証データによるモデルの評価
'''
# 検証データの予測値を取得
val_preds = model(x_validation)
# カテゴリカルデータの精度を取得するオブジェクト
categor_acc = tf.keras.metrics.CategoricalAccuracy()
# 精度を測定するデータを設定
categor_acc.update_state(t_validation, val_preds)
# 検証データの精度を取得
validation_acc = categor_acc.result().numpy()
# 検証データの損失を取得
validation_loss = loss(t_validation, val_preds)
print('validation_loss: {:.4f}, validation_acc: {:.4f}'.format(
validation_loss,
validation_acc
))
'''
10. テストデータによるモデルの評価
'''
# テストデータの予測値を取得
test_preds = model(ts_x)
# カテゴリカルデータの精度を取得するオブジェクト
categor_acc = tf.keras.metrics.CategoricalAccuracy()
# 精度を測定するデータを設定
categor_acc.update_state(ts_t, test_preds)
# テストデータの精度を取得
test_acc = categor_acc.result().numpy()
# テストデータの損失を取得
test_loss = loss(ts_t, test_preds)
print('test_loss: {:.4f}, test_acc: {:.4f}'.format(
test_loss,
test_acc
))
| 0.557364 | 0.804828 |
```
import pandas as pd
import matplotlib.pyplot as plt
ride_data = pd.read_csv("raw_data/ride_data.csv")
city_data = pd.read_csv("raw_data/city_data.csv")
ride_df = pd.DataFrame({
"Average Fare ($) Per City": round(ride_data.groupby(["city"]).mean()["fare"],2),
"Total Number of Rides Per City": ride_data.groupby(["city"]).count()["ride_id"]
})
ride_df_reset = ride_df.reset_index()
city_df_reset = city_data.reset_index()
ride_df_reset.head()
city_data.head()
merged_df = pd.merge(ride_df_reset,city_df_reset,on="city",how="left")
merged_df.head()
# x-axis = Total Number of Rides (Per City)
# y-axis = Average Fare ($)
# urban - red
urban_x = merged_df.loc[merged_df["type"] == "Urban"]["Total Number of Rides Per City"]
urban_y = merged_df.loc[merged_df["type"] == "Urban"]["Average Fare ($) Per City"]
urban_size = merged_df.loc[merged_df["type"] == "Urban"]["driver_count"]
# # suburban - blue
suburban_x = merged_df.loc[merged_df["type"] == "Suburban"]["Total Number of Rides Per City"]
suburban_y = merged_df.loc[merged_df["type"] == "Suburban"]["Average Fare ($) Per City"]
suburban_size = merged_df.loc[merged_df["type"] == "Suburban"]["driver_count"]
# # rural - yellow
rural_x = merged_df.loc[merged_df["type"] == "Rural"]["Total Number of Rides Per City"]
rural_y = merged_df.loc[merged_df["type"] == "Rural"]["Average Fare ($) Per City"]
rural_size = merged_df.loc[merged_df["type"] == "Rural"]["driver_count"]
# Graph
# plt.figure(figsize=(10,10))
plt.scatter(urban_x,urban_y,label="Urban",s=10*urban_size,c="red",edgecolor="black",alpha=.8)
plt.scatter(suburban_x,suburban_y,label="Suburban",s=10*suburban_size,c="blue", edgecolor="black",alpha=.8)
plt.scatter(rural_x,rural_y,label="Rural",s=10*rural_size,c="yellow",edgecolor="black",alpha=.8)
plt.title("Pyber Ride Sharing Data (2016)")
plt.xlabel("Total Number of Rides (Per City)")
plt.ylabel("Average Fare($)")
plt.legend(loc="best")
plt.grid()
plt.show()
pie_data.head()
fares_by_city_pct
pie_data = pd.merge(ride_data,city_data,on="city",how="left")
# % of Total Fares by City Type
fares_by_city_pct = pie_data.groupby(["type"]).sum()["fare"]/pie_data.sum()["fare"]
# % of Total Rides by City Type
rides_by_city_pct = pie_data.groupby(["type"]).count()["driver_count"]/len(pie_data)
# % of Total Drivers by City Type
drivers_by_city_pct = pie_data.groupby(["type"]).sum()["driver_count"]/pie_data.sum()["driver_count"]
drivers_by_city_pct
# percet_by_type = merged_df.groupby("type").sum()["fare"]/merged_df["fare"].sum()
plt.pie(fares_by_city_pct,
labels= ["Rural","Suburban","Urban"],
colors= ["gold","skyblue","coral"],
autopct= "%1.1f%%",
explode= [0,0,0.1],
shadow= True,
startangle= 150,)
plt.title("% of Fares by City Type")
plt.show()
plt.pie(rides_by_city_pct,
labels= ["Rural","Suburban","Urban"],
colors= ["gold","skyblue","coral"],
autopct= "%1.1f%%",
explode= [0,0,0.1],
shadow= True,
startangle= 150,)
plt.title("% of Fares by City Type")
plt.show()
plt.pie(drivers_by_city_pct,
labels= ["Rural","Suburban","Urban"],
colors= ["gold","skyblue","coral"],
autopct= "%1.1f%%",
explode= [0,0,0.1],
shadow= True,
startangle= 150,)
plt.title("% of Fares by City Type")
plt.show()
```
## Observed Trend:
1. Although urban rides are higher in volume they are amongst the cheaper fairs.
2. Urban fairs tend to be cheaper but own most of the marketshare by volume.
3. There appears to be a negative relationship between price and volume.
|
github_jupyter
|
import pandas as pd
import matplotlib.pyplot as plt
ride_data = pd.read_csv("raw_data/ride_data.csv")
city_data = pd.read_csv("raw_data/city_data.csv")
ride_df = pd.DataFrame({
"Average Fare ($) Per City": round(ride_data.groupby(["city"]).mean()["fare"],2),
"Total Number of Rides Per City": ride_data.groupby(["city"]).count()["ride_id"]
})
ride_df_reset = ride_df.reset_index()
city_df_reset = city_data.reset_index()
ride_df_reset.head()
city_data.head()
merged_df = pd.merge(ride_df_reset,city_df_reset,on="city",how="left")
merged_df.head()
# x-axis = Total Number of Rides (Per City)
# y-axis = Average Fare ($)
# urban - red
urban_x = merged_df.loc[merged_df["type"] == "Urban"]["Total Number of Rides Per City"]
urban_y = merged_df.loc[merged_df["type"] == "Urban"]["Average Fare ($) Per City"]
urban_size = merged_df.loc[merged_df["type"] == "Urban"]["driver_count"]
# # suburban - blue
suburban_x = merged_df.loc[merged_df["type"] == "Suburban"]["Total Number of Rides Per City"]
suburban_y = merged_df.loc[merged_df["type"] == "Suburban"]["Average Fare ($) Per City"]
suburban_size = merged_df.loc[merged_df["type"] == "Suburban"]["driver_count"]
# # rural - yellow
rural_x = merged_df.loc[merged_df["type"] == "Rural"]["Total Number of Rides Per City"]
rural_y = merged_df.loc[merged_df["type"] == "Rural"]["Average Fare ($) Per City"]
rural_size = merged_df.loc[merged_df["type"] == "Rural"]["driver_count"]
# Graph
# plt.figure(figsize=(10,10))
plt.scatter(urban_x,urban_y,label="Urban",s=10*urban_size,c="red",edgecolor="black",alpha=.8)
plt.scatter(suburban_x,suburban_y,label="Suburban",s=10*suburban_size,c="blue", edgecolor="black",alpha=.8)
plt.scatter(rural_x,rural_y,label="Rural",s=10*rural_size,c="yellow",edgecolor="black",alpha=.8)
plt.title("Pyber Ride Sharing Data (2016)")
plt.xlabel("Total Number of Rides (Per City)")
plt.ylabel("Average Fare($)")
plt.legend(loc="best")
plt.grid()
plt.show()
pie_data.head()
fares_by_city_pct
pie_data = pd.merge(ride_data,city_data,on="city",how="left")
# % of Total Fares by City Type
fares_by_city_pct = pie_data.groupby(["type"]).sum()["fare"]/pie_data.sum()["fare"]
# % of Total Rides by City Type
rides_by_city_pct = pie_data.groupby(["type"]).count()["driver_count"]/len(pie_data)
# % of Total Drivers by City Type
drivers_by_city_pct = pie_data.groupby(["type"]).sum()["driver_count"]/pie_data.sum()["driver_count"]
drivers_by_city_pct
# percet_by_type = merged_df.groupby("type").sum()["fare"]/merged_df["fare"].sum()
plt.pie(fares_by_city_pct,
labels= ["Rural","Suburban","Urban"],
colors= ["gold","skyblue","coral"],
autopct= "%1.1f%%",
explode= [0,0,0.1],
shadow= True,
startangle= 150,)
plt.title("% of Fares by City Type")
plt.show()
plt.pie(rides_by_city_pct,
labels= ["Rural","Suburban","Urban"],
colors= ["gold","skyblue","coral"],
autopct= "%1.1f%%",
explode= [0,0,0.1],
shadow= True,
startangle= 150,)
plt.title("% of Fares by City Type")
plt.show()
plt.pie(drivers_by_city_pct,
labels= ["Rural","Suburban","Urban"],
colors= ["gold","skyblue","coral"],
autopct= "%1.1f%%",
explode= [0,0,0.1],
shadow= True,
startangle= 150,)
plt.title("% of Fares by City Type")
plt.show()
| 0.32306 | 0.219735 |
# Tutorial 6: Network
## Overview
In this tutorial we are going to cover:
* [`Network` Scope](#t06network)
* [`TensorOp` and its Children](#t06tensorop)
* [How to Customize a `TensorOp`](#t06customize)
* [TensorFlow](#t06tf)
* [PyTorch](#t06torch)
* [fe.backend](#t06backend)
* [Related Apphub Examples](#t06apphub)
<a id='t06network'></a>
## Network Scope
`Network` is one of the three main FastestEstimator APIs that defines not only a neural network model but also all of the operations to be performed on it. This can include the deep-learning model itself, loss calculations, model updating rules, and any other functionality that you wish to execute within a GPU.
Here we show two `Network` example graphs to enhance the concept:
<img src="../resources/t06_network_example.png" alt="drawing" width="1000"/>
As the figure shows, models (orange) are only piece of a `Network`. It also includes other operations such as loss computation (blue) and update rules (green) that will be used during the training process.
<a id='t06tensorop'></a>
## TensorOp and its Children
A `Network` is composed of basic units called `TensorOps`. All of the building blocks inside a `Network` should derive from the `TensorOp` base class. A `TensorOp` is a kind of `Op` and therefore follows the same rules described in [Tutorial 3](./t03_operator.ipynb).
<img src="../resources/t06_tensorop_class.png" alt="drawing" width="500"/>
There are some common `TensorOp` classes we would like to specially mention because of their prevalence:
### ModelOp
Any model instance created from `fe.build` (see [Tutorial 5](./t05_model.ipynb)) needs to be packaged as a `ModelOp` such that it can interact with other components inside the `Network` API. The orange blocks in the first figure are `ModelOps`.
### UpdateOp
FastEstimator use `UpdateOp` to associate the model with its loss. Unlike other `Ops` that use `inputs` and `outputs` for expressing their connections, `UpdateOp` uses the arguments `loss`, and `model` instead. The green blocks in the first figure are `UpdateOps`.
### Others (loss, gradient, meta, etc.)
There are many ready-to-use `TensorOps` that users can directly import from `fe.op.tensorop`. Some examples include loss and gradient computation ops. There is also a category of `TensorOp` called `MetaOp`, which takes other Ops as input and generates more complex execution graphs (see [Advanced Tutorial 9](../advanced/t09_meta_ops.ipynb)).
For all available Ops please check out the FastEstimator API.
<a id='t06customize'></a>
## Customize a TensorOp
FastEstimator provides flexibility that allows users to customize their own `TensorOp`s by wrapping TensorFlow or PyTorch library calls, or by leveraging `fe.backend` API functions. Users only need to inherit the `TensorOp` class and overwrite its `forward` function.
If you want to customize a `TensorOp` by directly leveraging API calls from TensorFlow or PyTorch, **please make sure that all of the `TensorOp`s in the `Network` are backend-consistent**. In other words, you cannot have `TensorOp`s built specifically for TensorFlow and PyTorch in the same `Network`. Note that the `ModelOp` backend is determined by which library the model function uses, and so must be consistent with any custom `TensorOp` that you write.
Here we are going to demonstrate how to build a `TenorOp` that takes high dimensional inputs and returns an average scalar value. For more advanced tutorial of customizing a `TensorOp` please check out [Advanced Tutorial 3](../advanced/t03_operator.ipynb).
<a id='t06tf'></a>
### Example Using TensorFlow
```
from fastestimator.op.tensorop import TensorOp
import tensorflow as tf
class ReduceMean(TensorOp):
def forward(self, data, state):
return tf.reduce_mean(data)
```
<a id='t06torch'></a>
### Example Using PyTorch
```
from fastestimator.op.tensorop import TensorOp
import torch
class ReduceMean(TensorOp):
def forward(self, data, state):
return torch.mean(data)
```
<a id='t06backend'></a>
### Example Using `fe.backend`
You don't need to worry about backend consistency if you import a FastEstimator-provided `TensorOp`, or customize your `TenosorOp` using the `fe.backend` API. FastEstimator auto-magically handles everything for you.
```
from fastestimator.op.tensorop import TensorOp
from fastestimator.backend import reduce_mean
class ReduceMean(TensorOp):
def forward(self, data, state):
return reduce_mean(data)
```
<a id='t06apphub'></a>
## Apphub Examples
You can find some practical examples of the concepts described here in the following FastEstimator Apphubs:
* [Fast Style Transfer](../../apphub/style_transfer/fst_coco/fst.ipynb)
* [DC-GAN](../../apphub/image_generation/dcgan/dcgan.ipynb)
|
github_jupyter
|
from fastestimator.op.tensorop import TensorOp
import tensorflow as tf
class ReduceMean(TensorOp):
def forward(self, data, state):
return tf.reduce_mean(data)
from fastestimator.op.tensorop import TensorOp
import torch
class ReduceMean(TensorOp):
def forward(self, data, state):
return torch.mean(data)
from fastestimator.op.tensorop import TensorOp
from fastestimator.backend import reduce_mean
class ReduceMean(TensorOp):
def forward(self, data, state):
return reduce_mean(data)
| 0.836755 | 0.992885 |
# 第2回 ベクトル空間モデル
この演習ページでは,ベクトル空間モデルに基づく情報検索モデルについて説明します.具体的には,文書から特徴ベクトルへの変換方法,TF-IDFの計算方法,コサイン類似度による文書ランキングについて,その実装例を説明します.第2回演習の最終目的は,ある与えられた文書コーパスに対して,TF-IDFで重み付けされた特徴ベクトルによる文書ランキングが実装できるようになることです.
## ライブラリ
この回の演習では,以下のライブラリを使用します.
- [numpy, scipy](http://www.numpy.org/)
+ Pythonで科学技術計算を行うための基礎的なライブラリ.
- [gensim](https://radimrehurek.com/gensim/index.html)
+ トピックモデリング(LDA)やword2vecなどを手軽に利用するためのPythonライブラリ.
- [nltk (natural language toolkit)](http://www.nltk.org/)
+ 自然言語処理に関するpythonライブラリです.この演習ではストップワードのために用います.ほかにも,単語のステミングやトークナイズなどの機能をはじめ,品詞推定,依存関係分析など自然言語処理のあらゆるメソッドが用意されています.
- [pandas](http://pandas.pydata.org/)
+ pythonでデータ分析をするためのフレームワークです.この演習ではデータをプロットするために用いています.
## 第2回目の演習の内容
``h29iro/data/`` に `sample.corpus` というファイルを置いています. このファイルには改行区切りで3件の短い文書が保存されています.この演習では,このファイルに対してTF-IDFで重み付けされた特徴ベクトルを作成し,コサイン類似度によるランキングを行います.
## 1. 文書の読み込みとトークナイズ
まずは,`sample.corpus`を読み込み,各文書のBoW表現を抽出します.
```
import numpy as np
import gensim
from nltk.corpus import stopwords
import pandas as pd
np.set_printoptions(precision=4)
# 小数点3ケタまで表示
%precision 3
with open("../data/sample.corpus", "r") as f: #sample.corpusの読み込み
text = f.read().strip().split("\n") #sample.corpusのテキストデータを取得し,それを改行で分割
text
```
3件の文書があることが分かりますね.次に,文章をトークン(単語)に分割します.今回は簡単のため単純にスペース区切りによって単語に分割します.
```
raw_corpus = [d.lower().split() for d in text] #文章を小文字に変換して単語に分割する
print("d1=" , raw_corpus[0])
print("d2=" , raw_corpus[1])
print("d3=" , raw_corpus[2])
```
文が単語の集合に変換されました.しかし,この単語集合には "i" や "of" などのストップワードが含まれています.そこで,ストップワードを除去してみましょう.
ストップワードのリストはネットで探せば様々な種類が見つかります.ここでは,nltkのstopwordsモジュールを利用します.
```
# stopwords.words("english")に含まれていない単語のみ抽出
corpus = [list(filter(lambda word: word not in stopwords.words("english"), x)) for x in raw_corpus]
print("d1=" , corpus[0])
print("d2=" , corpus[1])
print("d3=" , corpus[2])
```
## 2. 特徴ベクトルの生成
次に文書の特徴ベクトルを生成します.ここからの流れは,以下の通りになります.
1. 文書集合(corpus)から 単語->単語ID の辞書 (dictionary) を作成する.
2. 作成された辞書を基に,文書を (単語ID,出現回数)の集合 (id_corpus) として表現する.
3. id_corpusからTfidfModelを用いて,TF-IDFで重み付けされた特徴ベクトルを作成する.
まずは,文書集合(コーパス)から単語->単語ID の辞書 (dictionary) を作成します.
```
dictionary = gensim.corpora.Dictionary(corpus) #コーパスを与えて,単語->IDの辞書を作成する
dictionary.token2id #作成された辞書の中身
```
このdictionaryを用いて,文書の単語をID化します.
```
id_corpus = [dictionary.doc2bow(document) for document in corpus]
id_corpus
```
作成されたid_corpusは,たとえば,1件目の文書は
```
id_corpus[0]
```
という内容になっています.たとえば,(0,2)というデータは
```
単語ID0の単語が2回出現
```
という内容を表しています. つまり,単語の出現頻度(term frequency)のみで文書を特徴ベクトル化したことになります.なお,これをnumpyのベクトルとして抽出したければ,corpus2denseメソッドを用います.
```
tf_vectors = gensim.matutils.corpus2dense(id_corpus, len(dictionary)).T
print("d1=", tf_vectors[0])
print("d2=", tf_vectors[1])
print("d3=", tf_vectors[2])
```
今回用意したコーパスは語彙数が8しかありませんが,実際のケースでは,この特徴ベクトルは非常に疎になることが容易に想像つくと思います.
さて,id_corpusからTFIDFで重み付けされた特徴ベクトルを得るには, models.TfidfModel メソッドを用います.
```
tfidf_model = gensim.models.TfidfModel(id_corpus, normalize=False) #normalize=Trueにすると,文書長によってtfを正規化する
tfidf_corpus = tfidf_model[id_corpus] #id_corpusをtfidfで重み付けされたものに変換
```
これでTF-IDFで重み付けされた特徴ベクトルが得られました.たとえば,1件目の文書$d_1$に対する特徴ベクトル${\mathbf d}_1$の中身を見てみます.
```
tfidf_corpus[0]
```
TFIDFの値は,(単語ID,重み) として得られています.単語IDを実際の単語に変換するにはdictionaryを通します.
```
[(dictionary[x[0]], x[1]) for x in tfidf_corpus[0]]#dictionary[token_id]でアクセスすると実際の単語が返ってくる
```
同様に2件目の文書$d_2$についても見てみます.
```
doc2 = [(dictionary[x[0]], x[1]) for x in tfidf_corpus[1]]
doc2
```
たとえば, 文書$d_{2}$における`japan`のTFIDF値が本当に正しいのか検証してみましょう.
$tfidf_{d_2, japan} = tf_{d_2, japan} \log \frac{N}{df_{japan}}$ ,
いま, $tf_{d_2, japan} = 2$, $N = 3$, $df_{japan}$ = 1 ですので,
$tfidf_{d_2, japan} = 2 \log 3 = 3.170$
となり,gensimで得られた結果と一致していることが分かります.
```
import math
2*math.log2(3) #2log3の計算方法
```
# 3. コサイン類似度
それでは,コサイン類似度による文書ランキングを行ってみましょう.
クエリと文書の類似度を測る前に,まずは文書同士のコサイン類似度を計算してみます. コサイン類似度の計算はgensimでも良いのですが,ここでは,いったんnumpyのベクトルを取得して,そのベクトルに対してコサイン類似度を計算してみます.
```
# 各文書のtfidfベクトルを取得
tfidf_vectors = gensim.matutils.corpus2dense(tfidf_corpus, len(dictionary)).T
print ("d1=", tfidf_vectors[0])
print ("d2=", tfidf_vectors[1])
print ("d3=", tfidf_vectors[2])
# コサイン類似度を計算する関数を用意
from scipy.spatial.distance import cosine
def cosine_sim(v1, v2):
#scipyのcosineは類似度ではなく距離関数のため, 1-コサイン距離 とすることで,コサイン類似度に変換する
return 1.0 - cosine(v1, v2)
# 各文書間のコサイン類似度を計算してみる
print ("sim(d1, d2)=", cosine_sim(tfidf_vectors[0], tfidf_vectors[1]))
print ("sim(d2, d3)=", cosine_sim(tfidf_vectors[1], tfidf_vectors[2]))
print ("sim(d1, d3)=", cosine_sim(tfidf_vectors[0], tfidf_vectors[2]))
```
それでは,クエリを特徴ベクトルに変換し,クエリと文書のコサイン類似度を求めていきましょう.
```
q = {"kansai", "japan"}
tfidf_q = tfidf_model[dictionary.doc2bow(q)] #クエリをtfidfベクトルに変換
query_vector = gensim.matutils.corpus2dense([tfidf_q], len(dictionary)).T[0] #numpyのベクトルに変換
print ("q=", query_vector)
print([(dictionary[x[0]], x[1]) for x in tfidf_q])
print ("sim(q, d1) = ", cosine_sim(query_vector, tfidf_vectors[0]))
print ("sim(q, d2) = ", cosine_sim(query_vector, tfidf_vectors[1]))
print ("sim(q, d3) = ", cosine_sim(query_vector, tfidf_vectors[2]))
```
この結果から,q={"kansai", "japan"} というクエリに対しては,$d_2,d_3, d_1$の順でランク付けされることが分かります.
## 4. ベクトル空間の可視化
最後に,得られた特徴ベクトルを可視化してみましょう.特徴ベクトルそのものは多次元(今回の場合は8次元)ですが,これを次元削減の手法を使って,2次元空間に射影してみます.今回は,`LSI`(Latent Semantic Indexing)という手法を用いて,特徴ベクトルを2次元空間に落とし込みます.LSIについては,講義で触れるかもしれません(講義の進み方次第).
```
import matplotlib.pylab as plt
%matplotlib inline
# LSIにより特徴ベクトルを2次元に落とし込む
lsi = gensim.models.LsiModel(tfidf_corpus, id2word=dictionary, num_topics=2)
lsi_corpus = lsi[tfidf_corpus]
lsi_vectors = gensim.matutils.corpus2dense(lsi_corpus, 2).T
print("d1=", lsi_vectors[0])
print("d2=", lsi_vectors[1])
print("d3=", lsi_vectors[2])
query_lsi_corpus = lsi[[tfidf_q]]
query_lsi_vector = gensim.matutils.corpus2dense(query_lsi_corpus, 2).T[0]
print ("q=", query_lsi_vector)
# 散布図にプロットするため,DataFrameに変換
axis_names = ["z1", "z2"]
doc_names = ["d1", "d2", "d3", "q"]
df = pd.DataFrame(np.r_[lsi_vectors, [query_lsi_vector]],
columns=axis_names, index=doc_names) # np.r_ は行列同士の連結
df
# 散布図をプロット
fig, ax = plt.subplots()
df.plot.scatter(x="z1", y="z2", ax=ax)
ax.axvline(x=0, lw=2, color='red') #x軸とy軸に線を引く
ax.axhline(y=0, lw=2, color='red')
ax.grid(True)
for k, v in df.iterrows():
ax.annotate(k, xy=(v[0]+0.05,v[1]+0.05),size=15) #データ点にラベル名を付与
```
この図を見てみると,やはりクエリ$q$と文書$d_2$はほぼ同じ方向(つまり,コサイン類似度が1に近い)であることがわかり, $q$と$d_1$の角度はほぼ直角(つまりコサイン類似度が0)であることがわかります.
----
# 演習課題その1 ベクトル空間モデル
## 必須課題(1) 与えられたコーパスに対する検索の実現
以下からコーパスを1つ以上選択し,ベクトル空間モデルに基づいた検索を実現せよ.3種類以上のクエリでの検索結果を示すこと.
1. 京都観光に関する83件の文書(h29iro/data/kyoto_results_100.json)
2. 各自で用意したコーパス.ただし,100件以上の文書数を含むこと.もっと多くてもよい.
3. Wikipedia([参考: gensim Tutorial](https://radimrehurek.com/gensim/wiki.html) )※ただし,モデル構築にとんでもない時間がかかるそうなので覚悟すること.
- ページに表示する検索結果は各クエリ5-10件程度で良い.
```
# 1.のコーパスはjson形式で保管されている.
import json
with open("../data/kyoto_results_100.json", "r") as f:
docs = json.load(f)
print("Num of docs = ", len(docs))
docs[0]
# `bow` には形態素解析でトークン化された単語列がスペース区切りで保存されている.
# これを使用して特徴ベクトルを作成するとよい.
docs[0]["bow"]
```
## 任意課題(a) Okapi BM25
上記(1)に対して, Okapi BM25 に基づくランキングを行い,上記(1)の結果と比較してみよ.
## 任意課題(b) 適合性フィードバック
適合性フィードバックによるクエリ修正を行い,検索結果がどのように変化するのか分析せよ.また,コーパス及びクエリを可視化することで,修正されたクエリが適合・不適合文書の特徴ベクトルにどのように影響されているか幾何的に分析せよ.
# 課題の提出方法
いずれかの方法で,ipython notebookのページ(.ipynbファイル)とそのhtml版を提出すること.
1. 添付ファイルで山本に送信.
- 送付先 tyamamot at dl.kuis.kyoto-u.ac.jp
2. 各自のgithubやgithub gistにアップロードし,そのURLを山本に送信.この場合はhtml版を用意する必要はない.
3. 上記以外で,山本が実際に.ipynbファイルを確認できる方法.
# 締切
- 2017年11月30日(木)23:59
- 締切に関する個別の相談は``受け付けます``.
|
github_jupyter
|
import numpy as np
import gensim
from nltk.corpus import stopwords
import pandas as pd
np.set_printoptions(precision=4)
# 小数点3ケタまで表示
%precision 3
with open("../data/sample.corpus", "r") as f: #sample.corpusの読み込み
text = f.read().strip().split("\n") #sample.corpusのテキストデータを取得し,それを改行で分割
text
raw_corpus = [d.lower().split() for d in text] #文章を小文字に変換して単語に分割する
print("d1=" , raw_corpus[0])
print("d2=" , raw_corpus[1])
print("d3=" , raw_corpus[2])
# stopwords.words("english")に含まれていない単語のみ抽出
corpus = [list(filter(lambda word: word not in stopwords.words("english"), x)) for x in raw_corpus]
print("d1=" , corpus[0])
print("d2=" , corpus[1])
print("d3=" , corpus[2])
dictionary = gensim.corpora.Dictionary(corpus) #コーパスを与えて,単語->IDの辞書を作成する
dictionary.token2id #作成された辞書の中身
id_corpus = [dictionary.doc2bow(document) for document in corpus]
id_corpus
id_corpus[0]
単語ID0の単語が2回出現
tf_vectors = gensim.matutils.corpus2dense(id_corpus, len(dictionary)).T
print("d1=", tf_vectors[0])
print("d2=", tf_vectors[1])
print("d3=", tf_vectors[2])
tfidf_model = gensim.models.TfidfModel(id_corpus, normalize=False) #normalize=Trueにすると,文書長によってtfを正規化する
tfidf_corpus = tfidf_model[id_corpus] #id_corpusをtfidfで重み付けされたものに変換
tfidf_corpus[0]
[(dictionary[x[0]], x[1]) for x in tfidf_corpus[0]]#dictionary[token_id]でアクセスすると実際の単語が返ってくる
doc2 = [(dictionary[x[0]], x[1]) for x in tfidf_corpus[1]]
doc2
import math
2*math.log2(3) #2log3の計算方法
# 各文書のtfidfベクトルを取得
tfidf_vectors = gensim.matutils.corpus2dense(tfidf_corpus, len(dictionary)).T
print ("d1=", tfidf_vectors[0])
print ("d2=", tfidf_vectors[1])
print ("d3=", tfidf_vectors[2])
# コサイン類似度を計算する関数を用意
from scipy.spatial.distance import cosine
def cosine_sim(v1, v2):
#scipyのcosineは類似度ではなく距離関数のため, 1-コサイン距離 とすることで,コサイン類似度に変換する
return 1.0 - cosine(v1, v2)
# 各文書間のコサイン類似度を計算してみる
print ("sim(d1, d2)=", cosine_sim(tfidf_vectors[0], tfidf_vectors[1]))
print ("sim(d2, d3)=", cosine_sim(tfidf_vectors[1], tfidf_vectors[2]))
print ("sim(d1, d3)=", cosine_sim(tfidf_vectors[0], tfidf_vectors[2]))
q = {"kansai", "japan"}
tfidf_q = tfidf_model[dictionary.doc2bow(q)] #クエリをtfidfベクトルに変換
query_vector = gensim.matutils.corpus2dense([tfidf_q], len(dictionary)).T[0] #numpyのベクトルに変換
print ("q=", query_vector)
print([(dictionary[x[0]], x[1]) for x in tfidf_q])
print ("sim(q, d1) = ", cosine_sim(query_vector, tfidf_vectors[0]))
print ("sim(q, d2) = ", cosine_sim(query_vector, tfidf_vectors[1]))
print ("sim(q, d3) = ", cosine_sim(query_vector, tfidf_vectors[2]))
import matplotlib.pylab as plt
%matplotlib inline
# LSIにより特徴ベクトルを2次元に落とし込む
lsi = gensim.models.LsiModel(tfidf_corpus, id2word=dictionary, num_topics=2)
lsi_corpus = lsi[tfidf_corpus]
lsi_vectors = gensim.matutils.corpus2dense(lsi_corpus, 2).T
print("d1=", lsi_vectors[0])
print("d2=", lsi_vectors[1])
print("d3=", lsi_vectors[2])
query_lsi_corpus = lsi[[tfidf_q]]
query_lsi_vector = gensim.matutils.corpus2dense(query_lsi_corpus, 2).T[0]
print ("q=", query_lsi_vector)
# 散布図にプロットするため,DataFrameに変換
axis_names = ["z1", "z2"]
doc_names = ["d1", "d2", "d3", "q"]
df = pd.DataFrame(np.r_[lsi_vectors, [query_lsi_vector]],
columns=axis_names, index=doc_names) # np.r_ は行列同士の連結
df
# 散布図をプロット
fig, ax = plt.subplots()
df.plot.scatter(x="z1", y="z2", ax=ax)
ax.axvline(x=0, lw=2, color='red') #x軸とy軸に線を引く
ax.axhline(y=0, lw=2, color='red')
ax.grid(True)
for k, v in df.iterrows():
ax.annotate(k, xy=(v[0]+0.05,v[1]+0.05),size=15) #データ点にラベル名を付与
# 1.のコーパスはjson形式で保管されている.
import json
with open("../data/kyoto_results_100.json", "r") as f:
docs = json.load(f)
print("Num of docs = ", len(docs))
docs[0]
# `bow` には形態素解析でトークン化された単語列がスペース区切りで保存されている.
# これを使用して特徴ベクトルを作成するとよい.
docs[0]["bow"]
| 0.264168 | 0.97658 |
# Load and Visualize FashionMNIST
---
In this notebook, we load and look at images from the [Fashion-MNIST database](https://github.com/zalandoresearch/fashion-mnist).
The first step in any classification problem is to look at the dataset you are working with. This will give you some details about the format of images and labels, as well as some insight into how you might approach defining a network to recognize patterns in such an image set.
PyTorch has some built-in datasets that you can use, and FashionMNIST is one of them; it has already been dowloaded into the `data/` directory in this notebook, so all we have to do is load these images using the FashionMNIST dataset class *and* load the data in batches with a `DataLoader`.
### Load the [data](http://pytorch.org/docs/master/torchvision/datasets.html)
#### Dataset class and Tensors
``torch.utils.data.Dataset`` is an abstract class representing a
dataset. The FashionMNIST class is an extension of this Dataset class and it allows us to 1. load batches of image/label data, and 2. uniformly apply transformations to our data, such as turning all our images into Tensor's for training a neural network. *Tensors are similar to numpy arrays, but can also be used on a GPU to accelerate computing.*
Let's see how to construct a training dataset.
```
# our basic libraries
import torch
import torchvision
# data loading and transforming
from torchvision.datasets import FashionMNIST
from torch.utils.data import DataLoader
from torchvision import transforms
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors for input into a CNN
## Define a transform to read the data in as a tensor
data_transform = transforms.ToTensor()
# choose the training and test datasets
train_data = FashionMNIST(root='./data', train=True,
download=False, transform=data_transform)
# Print out some stats about the training data
print('Train data, number of images: ', len(train_data))
```
#### Data iteration and batching
Next, we'll use ``torch.utils.data.DataLoader`` , which is an iterator that allows us to batch and shuffle the data.
In the next cell, we shuffle the data and load in image/label data in batches of size 20.
```
# prepare data loaders, set the batch_size
## TODO: you can try changing the batch_size to be larger or smaller
## when you get to training your network, see how batch_size affects the loss
batch_size = 20
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
# specify the image classes
classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
### Visualize some training data
This cell iterates over the training dataset, loading a random batch of image/label data, using `dataiter.next()`. It then plots the batch of images and labels in a `2 x batch_size/2` grid.
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(batch_size):
ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title(classes[labels[idx]])
```
### View an image in more detail
Each image in this dataset is a `28x28` pixel, normalized, grayscale image.
#### A note on normalization
Normalization ensures that, as we go through a feedforward and then backpropagation step in training our CNN, that each image feature will fall within a similar range of values and not overly activate any particular layer in our network. During the feedfoward step, a network takes in an input image and multiplies each input pixel by some convolutional filter weights (and adds biases!), then it applies some activation and pooling functions. Without normalization, it's much more likely that the calculated gradients in the backpropagaton step will be quite large and cause our loss to increase instead of converge.
```
# select an image by index
idx = 2
img = np.squeeze(images[idx])
# display the pixel values in that image
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
```
|
github_jupyter
|
# our basic libraries
import torch
import torchvision
# data loading and transforming
from torchvision.datasets import FashionMNIST
from torch.utils.data import DataLoader
from torchvision import transforms
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors for input into a CNN
## Define a transform to read the data in as a tensor
data_transform = transforms.ToTensor()
# choose the training and test datasets
train_data = FashionMNIST(root='./data', train=True,
download=False, transform=data_transform)
# Print out some stats about the training data
print('Train data, number of images: ', len(train_data))
# prepare data loaders, set the batch_size
## TODO: you can try changing the batch_size to be larger or smaller
## when you get to training your network, see how batch_size affects the loss
batch_size = 20
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
# specify the image classes
classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(batch_size):
ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title(classes[labels[idx]])
# select an image by index
idx = 2
img = np.squeeze(images[idx])
# display the pixel values in that image
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
| 0.550366 | 0.994402 |
## Multi-factor Model
Author: Victor Xiao
__Multi-factor model(MFM)__ is a factor models constructed based on the basis of __Arbitrage Pricing Theory(APT)__.
The core concept of __APT__ is that: for any security or portfolio expected return, there is a correlating group of unknown systematic factors; while maintianing the law of one-price, that is, assets class of the same risk-return must subjects to the same price. (Otherwise opportunity for arbitrage arose.)
The general formula for APT is as follow:
$$r_i = a_i + \sum^K_{k=1}b_{ik}f_k + \epsilon$$
- Where, $f_k$ is the factor that affects asset return, reflecting the kth risk factor exposure for the asset, known as __risk factor__.
- $b_{ik}$ describes the sensitivity of the asset i to factor k, knowns as __factor loading__ coefficient of asset i to factor k.
While APT sets the foundational framework for understanding the mechanism that affects the asset, it does not describe what the factors are. After devoting many resources into studying of this problem, Barra proposed the __Barra Multi-factor model__, the baseline multifactor model the industry uses today.
This notebook attempts to explore the general process involved in the construction of a multi-factor model.
### 1. Categories of Factors
The first step to construct multi-factor is to choose the approriate factors. Generally speaking, factors can be categorized into three classes:
- factor that reflects external impacts.
- factors that reflects comparative cross-sectional properties.
- internal or statistical factors.
### Barra China Equity Model(CNE5)
The ten style of CNE5 comprise of total of 21 descriptors. Below we will go through each style of factors and provide their definition and expression.
> __Beta__
Components: __Beta__ Beta($\beta$)
Computed at the slope coefficient in the time-series regression of excess stock return $r_t - r_{ft}$, against the cap-weighted excess return of the estimation universe $R_t$:
$$r_t - r_{ft} = a + \beta R_t + e_t$$
The regression cofficients are estimated over the trailling 252 trading days of returns with half life of 63 trading days. \
> __Momentum__
Components: __RSTR__ Relative Strength
Computed at the sume of excess log returns over the trailling T = 504 trading days with a lag of $L = 21$ trading days.
$$RSTR = \sum^{T+L}_{t=L}w_t [\ln(1+r_t) - \ln(1+r_{ft})]$$
Where $r_t$ is the stock return on day $t$, $r_{ft}$ is the risk-free return, $w_t$ is an exponential weight with a half-life of 126 trading days.
> __Size__
Components: __LNCAP__ Natural log of Market cap
Computed by taking the logarithm of the total market capitalization of the firm.
> __Earnings Yield__
Definition: 0.68 * EPIBS + 0.11 * ETOP + 0.21 * CETOP
Components:
__EPIBS__: Analyst Predicted Earnings-to-Price.
Earning ratios forecasted by analysts.
__ETOP__: Trailing earnings-to-price ratio.
Computed by dividing the trailing 12-month earnings by the current market capitalization. Trailing earnings are defined
```
# Necessary Imports
import numpy as np
import pandas as pd
# Reading the Data into the dataframe
```
|
github_jupyter
|
# Necessary Imports
import numpy as np
import pandas as pd
# Reading the Data into the dataframe
| 0.265976 | 0.915922 |
## 3. Analyze Data
[](https://www.youtube.com/watch?v=5yv_ID4YNTI&list=PLLBUgWXdTBDg1Qgmwt4jKtVn9BWh5-zgy "Python Data Science")
Once data is read into Python, a first step is to analyze the data with summary statistics. This is especially true if the data set is large. Summary statistics include the count, mean, standard deviation, maximum, minimum, and quartile information for the data columns.

### Generate Data
Run the next cell to:
- Generate `n` linearly spaced values betweeen `0` and `n-1` with `np.linspace(start,end,count)`
- Draw random samples from a uniform distribution between 0 and 1 with `np.random.rand(count)`
- Draw random samples from a normal (Gaussian) distribution with `np.random.normal(mean,std,count)`
- Combine `time`, `x`, and `y` with a vertical stack `np.vstack` and transpose `.T` for column oriented data.
- Save CSV text file `03-data.csv` with header `time,x,y`.
```
import numpy as np
np.random.seed(0)
n = 1000
time = np.linspace(0,n-1,n)
x = np.random.rand(n)
y = np.random.normal(1,1,n)
data = np.vstack((time,x,y)).T
np.savetxt('03-data.csv',data,header='time,x,y',delimiter=',',comments='')
```

### Display Data Distributions
The histogram is a preview of how to create graphics so that data can be evaluated visually. [04. Visualize](https://github.com/APMonitor/data_science/blob/master/04.%20Visualize.ipynb) shows how to create plots to analyze data.
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(x,10,label='x')
plt.hist(y,60,label='y',alpha=0.7)
plt.ylabel('Count'); plt.legend()
plt.show()
```

### Data Analysis with `numpy`
The `np.loadtxt` function reads the CSV data file `03-data.csv`. Numpy calculates `size` (dimensions), `mean` (average), `std` (standard deviation), and `median` as summary statistics. If you don't specify the `axis` then `numpy` gives a statistic across both the rows (`axis=0`) and columns (`axis=1`).
```
import numpy as np
data = np.loadtxt('03-data.csv',delimiter=',',skiprows=1)
print('Dimension (rows,columns):')
print(np.size(data,0),np.size(data,1))
print('Average:')
print(np.mean(data,axis=0))
print('Standard Deviation:')
print(np.std(data,0))
print('Median:')
print(np.median(data,0))
```

### Analyze data
1. Calculate the mean, standard deviation, and median of `x*y`
2. Calculate the `skew` of `x*y` with the `scipy.stats` [skew function](https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.stats.skew.html).

### Data Analysis with `pandas`
Pandas simplifies the data analysis with the `.describe()` function that is a method of `DataFrame` that is created with `pd.read_csv()`. Note that the data file can either be a local file name or a web-address such as
```python
url='https://apmonitor.com/pdc/uploads/Main/tclab_data2.txt'
data = pd.read_csv(url)
data.describe()
```
```
import pandas as pd
data = pd.read_csv('03-data.csv')
data.describe()
```

### TCLab Activity

### Generate Data Set 1
Generate a file from the TCLab data with seconds (`t`), heater levels (`Q1` and `Q2`), and temperatures (`lab.T1` and `lab.T2`). Record data every second for 120 seconds and change the heater levels every 30 seconds to a random number between 0 and 80 with `np.random.randint()`. There is no need to change this program, only run it for 2 minutes to collect the data. If you do not have a TCLab device, read a data file 1 from [an online link](https://apmonitor.com/do/uploads/Main/tclab_dyn_data2.txt).
```
import tclab, time, csv
import pandas as pd
import numpy as np
try:
# connect to TCLab if available
n = 120
with open('03-tclab1.csv',mode='w',newline='') as f:
cw = csv.writer(f)
cw.writerow(['Time','Q1','Q2','T1','T2'])
with tclab.TCLab() as lab:
print('t Q1 Q2 T1 T2')
for t in range(n):
if t%30==0:
Q1 = np.random.randint(0,81)
Q2 = np.random.randint(0,81)
lab.Q1(Q1); lab.Q2(Q2)
cw.writerow([t,Q1,Q2,lab.T1,lab.T2])
if t%5==0:
print(t,Q1,Q2,lab.T1,lab.T2)
time.sleep(1)
file = '03-tclab1.csv'
data1=pd.read_csv(file)
except:
print('No TCLab device found, reading online file')
url = 'http://apmonitor.com/do/uploads/Main/tclab_dyn_data2.txt'
data1=pd.read_csv(url)
```
### Read Data Set 2
Use `requests` to download a sample TCLab data file for the analysis. It is saved as `03-tclab2.csv`.
```
import requests
import os
url = 'http://apmonitor.com/pdc/uploads/Main/tclab_data2.txt'
r = requests.get(url)
with open('03-tclab2.csv', 'wb') as f:
f.write(r.content)
print('File 03-tclab2.csv retrieved to current working directory: ')
print(os.getcwd())
```
### Data Analysis
Read the files `03-tclab1.csv` and `03-tclab2.csv` and display summary statistics for each with `data.describe()`. Use the summary statistics to compare the number of samples and differences in average and standard deviation value for `T1` and `T2`.
|
github_jupyter
|
import numpy as np
np.random.seed(0)
n = 1000
time = np.linspace(0,n-1,n)
x = np.random.rand(n)
y = np.random.normal(1,1,n)
data = np.vstack((time,x,y)).T
np.savetxt('03-data.csv',data,header='time,x,y',delimiter=',',comments='')
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(x,10,label='x')
plt.hist(y,60,label='y',alpha=0.7)
plt.ylabel('Count'); plt.legend()
plt.show()
import numpy as np
data = np.loadtxt('03-data.csv',delimiter=',',skiprows=1)
print('Dimension (rows,columns):')
print(np.size(data,0),np.size(data,1))
print('Average:')
print(np.mean(data,axis=0))
print('Standard Deviation:')
print(np.std(data,0))
print('Median:')
print(np.median(data,0))
url='https://apmonitor.com/pdc/uploads/Main/tclab_data2.txt'
data = pd.read_csv(url)
data.describe()
import pandas as pd
data = pd.read_csv('03-data.csv')
data.describe()
import tclab, time, csv
import pandas as pd
import numpy as np
try:
# connect to TCLab if available
n = 120
with open('03-tclab1.csv',mode='w',newline='') as f:
cw = csv.writer(f)
cw.writerow(['Time','Q1','Q2','T1','T2'])
with tclab.TCLab() as lab:
print('t Q1 Q2 T1 T2')
for t in range(n):
if t%30==0:
Q1 = np.random.randint(0,81)
Q2 = np.random.randint(0,81)
lab.Q1(Q1); lab.Q2(Q2)
cw.writerow([t,Q1,Q2,lab.T1,lab.T2])
if t%5==0:
print(t,Q1,Q2,lab.T1,lab.T2)
time.sleep(1)
file = '03-tclab1.csv'
data1=pd.read_csv(file)
except:
print('No TCLab device found, reading online file')
url = 'http://apmonitor.com/do/uploads/Main/tclab_dyn_data2.txt'
data1=pd.read_csv(url)
import requests
import os
url = 'http://apmonitor.com/pdc/uploads/Main/tclab_data2.txt'
r = requests.get(url)
with open('03-tclab2.csv', 'wb') as f:
f.write(r.content)
print('File 03-tclab2.csv retrieved to current working directory: ')
print(os.getcwd())
| 0.130092 | 0.974386 |
# Pneumonia detection from chest x-ray images with transfer learning
### Dr. Tirthajyoti Sarkar, Fremont, CA 94536 ([LinkedIn](https://www.linkedin.com/in/tirthajyoti-sarkar-2127aa7/), [Github](https://tirthajyoti.github.io))
For more tutorial-style notebooks on deep learning, **[here is my Github repo](https://github.com/tirthajyoti/Deep-learning-with-Python)**.
For more tutorial-style notebooks on general machine learning, **[here is my Github repo](https://github.com/tirthajyoti/Machine-Learning-with-Python)**.
---
### What is this Notebook about?
In this notebook, we show a standard flow of transfer learning for image classification tasks by constructing a new model by,
- loading pre-trained weights of Inception V3 model (trained on the ImageNet database),
- freezing convolutional and associated layers, and
- adding a fresh global average pooling + dense layers on top.
Then, we train this new model with the X-ray Pneumonia dataset.
## The chest xray image dataset
The dataset can be found here: https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia
According to the World Health Organization (WHO), pneumonia kills about 2 million children under 5 years old every year and is consistently estimated as the single leading cause of childhood mortality , killing more children than HIV/AIDS, malaria, and measles combined.
The WHO reports that nearly all cases (95%) of new-onset childhood clinical pneumonia occur in developing countries, particularly in Southeast Asia and Africa. Bacterial and viral pathogens are the two leading causes of pneumonia but require very different forms of management. Bacterial pneumonia requires urgent referral for immediate antibiotic treatment, while viral pneumonia is treated with supportive care. Therefore, accurate and timely diagnosis is imperative.
One key element of diagnosis is radiographic data, since chest X-rays are routinely obtained as standard of care and can help differentiate between different types of pneumonia. However, rapid radiologic interpretation of images is not always available, particularly in the low-resource settings where childhood pneumonia has the highest incidence and highest rates of mortality. To this end, it is beneficial to investigate the effectiveness of a transfer-learning-based image classification framework in classifying pediatric chest X-rays to detect pneumonia.
Read more about such deep learning based medical diagnosis applications in this paper: **[Identifying Medical Diagnoses and Treatable Diseases by Image-Based Deep Learning](https://www.cell.com/cell/fulltext/S0092-8674(18)30154-5)**
---
```
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from PIL import Image
import tensorflow as tf
from keras.preprocessing.image import img_to_array, load_img
import cv2
```
### How many examples are there?
```
train_directory = "../Data/chest-xray-pneumonia/train/"
val_directory = "../Data/chest-xray-pneumonia/val/"
test_directory = "../Data/chest-xray-pneumonia/test/"
print('NORMAL training images:', len(os.listdir(train_directory+'/NORMAL/')))
print('PNEUMONIA training images:', len(os.listdir(train_directory+'/PNEUMONIA/')))
print('NORMAL validation images:', len(os.listdir(val_directory+'/NORMAL/')))
print('PNEUMONIA validation images:', len(os.listdir(val_directory+'/PNEUMONIA/')))
print('NORMAL test images:', len(os.listdir(test_directory+'/NORMAL/')))
print('PNEUMONIA test images:', len(os.listdir(test_directory+'/PNEUMONIA/')))
```
### Can you tell the difference between the normal and pathological examples?
```
test_normal_names = os.listdir(test_directory+'/NORMAL/')
test_pneumonia_names = os.listdir(test_directory+'/PNEUMONIA/')
num_test_normal = len(test_normal_names)
num_test_pneumonia = len(test_pneumonia_names)
fig, ax = plt.subplots(1,4,figsize=(16,4))
ax = ax.ravel()
print("Here are 4 normal xray images...\n")
for i in range(4):
img_path = test_directory+'/NORMAL/'+test_normal_names[i]
img = mpimg.imread(img_path)
ax[i].imshow(img,cmap='gray')
ax[i].axis('off')
plt.show()
fig, ax = plt.subplots(1,4,figsize=(16,4))
ax = ax.ravel()
print("Here are 4 pathological (pneumonia) xray images...\n")
for i in range(4):
img_path = test_directory+'/PNEUMONIA/'+test_pneumonia_names[i]
img = mpimg.imread(img_path)
ax[i].imshow(img,cmap='gray')
ax[i].axis('off')
plt.show()
def predict_class(ndarray):
"""
Predicts the class from the output array of a neural net output
"""
return np.argmax(ndarray)
```
## Transfer learning with Inception-V3 model
Inception V3 was the 1st Runner Up for image classification in ILSVRC ([ImageNet Large Scale Visual Recognition Competition](http://www.image-net.org/challenges/LSVRC/)) 2015.
The Inception deep convolutional architecture was introduced as GoogLeNet in ([Szegedy et al. 2015a](https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf)), here named Inception-V1. Later the Inception architecture was refined in various ways, first by the introduction of batch normalization ([Ioffe and Szegedy 2015](http://proceedings.mlr.press/v37/ioffe15.pdf)) (Inception-V2). Later by additional factorization ideas in the third iteration ([Szegedy et al. 2015b](https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Szegedy_Rethinking_the_Inception_CVPR_2016_paper.pdf)) which will be referred to as Inception-V3.
Read this article to know more about this model.
https://medium.com/@sh.tsang/review-inception-v3-1st-runner-up-image-classification-in-ilsvrc-2015-17915421f77c
```
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
from keras.optimizers import RMSprop,Adam
```
### Create the base pre-trained model
```
base_model = InceptionV3(weights='imagenet', include_top=False)
```
### Add a global spatial average pooling layer
```
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
```
### Add a couple of densely connected layers
```
x = Dense(512, activation='relu')(x)
x = Dense(128, activation='relu')(x)
```
### Add a logistic layer for binary calssification (we can still use a softmax)
```
predictions = Dense(2, activation='softmax')(x)
```
### This is the final model
```
model = Model(inputs=base_model.input, outputs=predictions)
```
### Train only the top layers and freeze all convolutional InceptionV3 layers
```
for layer in base_model.layers:
layer.trainable = False
```
### Compile
```
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=0.001),
metrics=['acc'])
```
### Summary (it's a long one)
- Total params: 22,917,794
- Trainable params: 1,115,010
- Non-trainable params: 21,802,784
So, only 1.1 million parameters (weights) will be trained out of 22.9 million total weights in this model. Rest come from the Inception V3 model.
```
model.summary()
```
### Create an `ImageDataGenerator` object to scale and use the `flow_from_directory` method to yield training images one by one
We create both `train_generator` and `val_generator` objects to train and check validation accuracy while training.
Note, however, that because we have only a modest amount of validation images, we set the `batch_size=1` for `val_generator`
```
batch_size=64
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1/255)
val_datagen = ImageDataGenerator(rescale=1/255)
# Flow training images in batches using the generator
train_generator = train_datagen.flow_from_directory(train_directory,target_size=(200,200),batch_size=batch_size)
val_generator = val_datagen.flow_from_directory(val_directory,target_size=(200,200),batch_size=1)
train_sample = train_generator.n
val_sample = val_generator.n
```
### Use the `fit_generator` method to utilize the `train_generator` object
Remember to pass on both train and val generators to the fit function.
Note the `steps_per_epoch` argument to `fit_generator`. Since, this is a generic [Python generator](https://realpython.com/introduction-to-python-generators/), it never stops and therefore will not know where a particular epoch is ending and the next one is starting. **We have to let it know the steps in a single epoch**. This is, in most cases, the length of the total training sample divided by the batch size.
For `validation_steps` we just pass on the `val_sample` since batch size is 1 for validation set.
```
model.fit_generator(generator=train_generator,
steps_per_epoch=int(train_sample/batch_size)+1,
validation_data=val_generator,
validation_steps=val_sample,
epochs=20)
```
### Plot training and validation accuracy over epochs
```
import matplotlib.pyplot as plt
plt.figure(figsize=(8,5))
plt.title("Training and validation accuracy over epochs",fontsize=18)
plt.plot(np.arange(1,21),model.history.history['acc'],c='k',lw=3)
plt.plot(np.arange(1,21),model.history.history['val_acc'],c='blue',lw=3)
plt.legend(['Training acc','Val acc'],fontsize=14)
plt.grid(True)
plt.xlabel("Epochs",fontsize=15)
plt.ylabel("Training/val accuracy",fontsize=15)
plt.xticks(np.arange(1,21,2),fontsize=15)
plt.yticks(fontsize=15)
plt.show()
```
## Check the accuracy on the test images
```
normal_array=[]
for i in range(num_test_normal):
img_path = test_directory+'/NORMAL/'+test_normal_names[i]
img = load_img(img_path)
img = img_to_array(img)
img = img/255.0
img = cv2.resize(img,(200,200))
img = np.expand_dims(img,axis=0)
normal_array.append(predict_class(model.predict(img)))
normal_array = np.array(normal_array)
normal_correct = (normal_array==0).sum()
print("Out of {} normal images the model predicted correctly for {} images".format(num_test_normal,normal_correct))
pneumonia_array=[]
for i in range(num_test_pneumonia):
img_path = test_directory+'/PNEUMONIA/'+test_pneumonia_names[i]
img = load_img(img_path)
img = img_to_array(img)
img = img/255.0
img = cv2.resize(img,(200,200))
img = np.expand_dims(img,axis=0)
pneumonia_array.append(np.argmax(model.predict(img),axis=1))
pneumonia_array = np.array(pneumonia_array)
pneumonia_correct = (pneumonia_array==1).sum()
print("Out of {} pneumonia images the model predicted correctly for {} images".format(num_test_pneumonia,pneumonia_correct))
```
## Summary
We created a transfer-learning model based on the famous Inception V3 and trained it with chest X-ray images for pneumonia detection. While the classifier makes good prediction for true pneumonia images, its prediction is underwhelming for the normal cases i.e. it makes a lot of false positive errors (classifies the image as pneumonia when it is not).
Further hyperparameter tuning may be needed for getting a good overall performance. Also, we resized the images to 200x200 from a high-quality, and that may be impacting some of the performance.
The validation error plot looks erratic, and therefore, we may simply have to reduce the learning rate and train for far larger number of epochs.
However, the notebook provides a step-by-step illustration of transfer learning with a pre-trained model.
|
github_jupyter
|
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from PIL import Image
import tensorflow as tf
from keras.preprocessing.image import img_to_array, load_img
import cv2
train_directory = "../Data/chest-xray-pneumonia/train/"
val_directory = "../Data/chest-xray-pneumonia/val/"
test_directory = "../Data/chest-xray-pneumonia/test/"
print('NORMAL training images:', len(os.listdir(train_directory+'/NORMAL/')))
print('PNEUMONIA training images:', len(os.listdir(train_directory+'/PNEUMONIA/')))
print('NORMAL validation images:', len(os.listdir(val_directory+'/NORMAL/')))
print('PNEUMONIA validation images:', len(os.listdir(val_directory+'/PNEUMONIA/')))
print('NORMAL test images:', len(os.listdir(test_directory+'/NORMAL/')))
print('PNEUMONIA test images:', len(os.listdir(test_directory+'/PNEUMONIA/')))
test_normal_names = os.listdir(test_directory+'/NORMAL/')
test_pneumonia_names = os.listdir(test_directory+'/PNEUMONIA/')
num_test_normal = len(test_normal_names)
num_test_pneumonia = len(test_pneumonia_names)
fig, ax = plt.subplots(1,4,figsize=(16,4))
ax = ax.ravel()
print("Here are 4 normal xray images...\n")
for i in range(4):
img_path = test_directory+'/NORMAL/'+test_normal_names[i]
img = mpimg.imread(img_path)
ax[i].imshow(img,cmap='gray')
ax[i].axis('off')
plt.show()
fig, ax = plt.subplots(1,4,figsize=(16,4))
ax = ax.ravel()
print("Here are 4 pathological (pneumonia) xray images...\n")
for i in range(4):
img_path = test_directory+'/PNEUMONIA/'+test_pneumonia_names[i]
img = mpimg.imread(img_path)
ax[i].imshow(img,cmap='gray')
ax[i].axis('off')
plt.show()
def predict_class(ndarray):
"""
Predicts the class from the output array of a neural net output
"""
return np.argmax(ndarray)
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
from keras.optimizers import RMSprop,Adam
base_model = InceptionV3(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(512, activation='relu')(x)
x = Dense(128, activation='relu')(x)
predictions = Dense(2, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=0.001),
metrics=['acc'])
model.summary()
batch_size=64
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1/255)
val_datagen = ImageDataGenerator(rescale=1/255)
# Flow training images in batches using the generator
train_generator = train_datagen.flow_from_directory(train_directory,target_size=(200,200),batch_size=batch_size)
val_generator = val_datagen.flow_from_directory(val_directory,target_size=(200,200),batch_size=1)
train_sample = train_generator.n
val_sample = val_generator.n
model.fit_generator(generator=train_generator,
steps_per_epoch=int(train_sample/batch_size)+1,
validation_data=val_generator,
validation_steps=val_sample,
epochs=20)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,5))
plt.title("Training and validation accuracy over epochs",fontsize=18)
plt.plot(np.arange(1,21),model.history.history['acc'],c='k',lw=3)
plt.plot(np.arange(1,21),model.history.history['val_acc'],c='blue',lw=3)
plt.legend(['Training acc','Val acc'],fontsize=14)
plt.grid(True)
plt.xlabel("Epochs",fontsize=15)
plt.ylabel("Training/val accuracy",fontsize=15)
plt.xticks(np.arange(1,21,2),fontsize=15)
plt.yticks(fontsize=15)
plt.show()
normal_array=[]
for i in range(num_test_normal):
img_path = test_directory+'/NORMAL/'+test_normal_names[i]
img = load_img(img_path)
img = img_to_array(img)
img = img/255.0
img = cv2.resize(img,(200,200))
img = np.expand_dims(img,axis=0)
normal_array.append(predict_class(model.predict(img)))
normal_array = np.array(normal_array)
normal_correct = (normal_array==0).sum()
print("Out of {} normal images the model predicted correctly for {} images".format(num_test_normal,normal_correct))
pneumonia_array=[]
for i in range(num_test_pneumonia):
img_path = test_directory+'/PNEUMONIA/'+test_pneumonia_names[i]
img = load_img(img_path)
img = img_to_array(img)
img = img/255.0
img = cv2.resize(img,(200,200))
img = np.expand_dims(img,axis=0)
pneumonia_array.append(np.argmax(model.predict(img),axis=1))
pneumonia_array = np.array(pneumonia_array)
pneumonia_correct = (pneumonia_array==1).sum()
print("Out of {} pneumonia images the model predicted correctly for {} images".format(num_test_pneumonia,pneumonia_correct))
| 0.535098 | 0.978198 |
# Creating a Filter, Edge Detection
### Import resources and display image
```
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import numpy as np
%matplotlib inline
# Read in the image
image = mpimg.imread('images/curved_lane.jpg')
plt.imshow(image)
```
### Convert the image to grayscale
```
# Convert to grayscale for filtering
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
plt.imshow(gray, cmap='gray')
```
### TODO: Create a custom kernel
Below, you've been given one common type of edge detection filter: a Sobel operator.
The Sobel filter is very commonly used in edge detection and in finding patterns in intensity in an image. Applying a Sobel filter to an image is a way of **taking (an approximation) of the derivative of the image** in the x or y direction, separately. The operators look as follows.
<img src="images/sobel_ops.png" width=200 height=200>
**It's up to you to create a Sobel x operator and apply it to the given image.**
For a challenge, see if you can put the image through a series of filters: first one that blurs the image (takes an average of pixels), and then one that detects the edges.
```
# Create a custom kernel
# 3x3 array for edge detection
sobel_y = np.array([[ -1, -2, -1],
[ 0, 0, 0],
[ 1, 2, 1]])
## TODO: Create and apply a Sobel x operator
sobel_x = np.array([[ 0, -2, 0],
[-2, 0, 2],
[ 0, 2, 0]])
# Filter the image using filter2D, which has inputs: (grayscale image, bit-depth, kernel)
filtered_image = cv2.filter2D(gray, -1, sobel_y)
plt.imshow(filtered_image, cmap='gray')
```
### Test out other filters!
You're encouraged to create other kinds of filters and apply them to see what happens! As an **optional exercise**, try the following:
* Create a filter with decimal value weights.
* Create a 5x5 filter
* Apply your filters to the other images in the `images` directory.
```
filtered_image = cv2.filter2D(gray, -1, sobel_x)
plt.imshow(filtered_image, cmap='gray')
sobel_dec = np.array([[ -0.5, -0.75, -0.5],
[ 0, 0, 0],
[ 0.5, 0.75, 0.5]])
filtered_image = cv2.filter2D(gray, -1, sobel_dec)
plt.imshow(filtered_image, cmap='gray')
sobel_5 = np.array([ [ -0.5, -0.75, -1, -0.75, -0.5],
[ 0, 0, 0, 0, 0],
[ 0.5, 0.75, 1, 0.75, 0.5]])
filtered_image = cv2.filter2D(gray, -1, sobel_dec)
plt.imshow(filtered_image, cmap='gray')
```
**Second Image**
```
image2 = mpimg.imread('images/'+os.listdir('images')[0])
plt.imshow(image2)
# Convert to grayscale for filtering
gray2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)
plt.imshow(gray2, cmap='gray')
sobel_5 = np.array([ [ -0.5, -0.75, -1, -0.75, -0.5],
[ 0, 0, 0, 0, 0],
[ 0.5, 0.75, 1, 0.75, 0.5]])
filtered_image2 = cv2.filter2D(gray2, -1, sobel_dec)
plt.imshow(filtered_image2, cmap='gray')
```
|
github_jupyter
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import numpy as np
%matplotlib inline
# Read in the image
image = mpimg.imread('images/curved_lane.jpg')
plt.imshow(image)
# Convert to grayscale for filtering
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
plt.imshow(gray, cmap='gray')
# Create a custom kernel
# 3x3 array for edge detection
sobel_y = np.array([[ -1, -2, -1],
[ 0, 0, 0],
[ 1, 2, 1]])
## TODO: Create and apply a Sobel x operator
sobel_x = np.array([[ 0, -2, 0],
[-2, 0, 2],
[ 0, 2, 0]])
# Filter the image using filter2D, which has inputs: (grayscale image, bit-depth, kernel)
filtered_image = cv2.filter2D(gray, -1, sobel_y)
plt.imshow(filtered_image, cmap='gray')
filtered_image = cv2.filter2D(gray, -1, sobel_x)
plt.imshow(filtered_image, cmap='gray')
sobel_dec = np.array([[ -0.5, -0.75, -0.5],
[ 0, 0, 0],
[ 0.5, 0.75, 0.5]])
filtered_image = cv2.filter2D(gray, -1, sobel_dec)
plt.imshow(filtered_image, cmap='gray')
sobel_5 = np.array([ [ -0.5, -0.75, -1, -0.75, -0.5],
[ 0, 0, 0, 0, 0],
[ 0.5, 0.75, 1, 0.75, 0.5]])
filtered_image = cv2.filter2D(gray, -1, sobel_dec)
plt.imshow(filtered_image, cmap='gray')
image2 = mpimg.imread('images/'+os.listdir('images')[0])
plt.imshow(image2)
# Convert to grayscale for filtering
gray2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)
plt.imshow(gray2, cmap='gray')
sobel_5 = np.array([ [ -0.5, -0.75, -1, -0.75, -0.5],
[ 0, 0, 0, 0, 0],
[ 0.5, 0.75, 1, 0.75, 0.5]])
filtered_image2 = cv2.filter2D(gray2, -1, sobel_dec)
plt.imshow(filtered_image2, cmap='gray')
| 0.269133 | 0.993069 |
# Origin and explanations:
https://medium.com/@curiousily/credit-card-fraud-detection-using-autoencoders-in-keras-tensorflow-for-hackers-part-vii-20e0c85301bd
The dataset we're going to use can be downloaded from [Kaggle](https://www.kaggle.com/dalpozz/creditcardfraud). It contains data about credit card transactions that occurred during a period of two days, with 492 frauds out of 284,807 transactions.
All variables in the dataset are numerical. The data has been transformed using PCA transformation(s) due to privacy reasons. The two features that haven't been changed are Time and Amount. Time contains the seconds elapsed between each transaction and the first transaction in the dataset.
```
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy import stats
import tensorflow as tf
import seaborn as sns
from pylab import rcParams
from sklearn.model_selection import train_test_split
if int(tf.__version__[0]) > 1:
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
from tensorflow.keras import regularizers
else:
from keras.models import Model, load_model
from keras.layers import Input, Dense
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
from keras import regularizers
%matplotlib inline
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
LABELS = ["Normal", "Fraud"]
```
# Loading the data
```
df = pd.read_csv("data/creditcard.csv")
```
# Exploration
```
df.shape
```
31 columns, 2 of which are Time and Amount. The rest are output from the PCA transformation. Let's check for missing values:
```
df.isnull().values.any()
count_classes = pd.value_counts(df['Class'], sort = True)
count_classes.plot(kind = 'bar', rot=0)
plt.title("Transaction class distribution")
plt.xticks(range(2), LABELS)
plt.xlabel("Class")
plt.ylabel("Frequency");
```
We have a highly imbalanced dataset on our hands. Normal transactions overwhelm the fraudulent ones by a large margin. Let's look at the two types of transactions:
```
frauds = df[df.Class == 1]
normal = df[df.Class == 0]
frauds.shape
normal.shape
```
How different are the amount of money used in different transaction classes?
```
frauds.Amount.describe()
normal.Amount.describe()
```
Let's have a more graphical representation:
```
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Amount per transaction by class')
bins = 50
ax1.hist(frauds.Amount, bins = bins)
ax1.set_title('Fraud')
ax2.hist(normal.Amount, bins = bins)
ax2.set_title('Normal')
plt.xlabel('Amount ($)')
plt.ylabel('Number of Transactions')
plt.xlim((0, 20000))
plt.yscale('log')
plt.show();
```
Do fraudulent transactions occur more often during certain time?
```
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Time of transaction vs Amount by class')
ax1.scatter(frauds.Time, frauds.Amount)
ax1.set_title('Fraud')
ax2.scatter(normal.Time, normal.Amount)
ax2.set_title('Normal')
plt.xlabel('Time (in Seconds)')
plt.ylabel('Amount')
plt.show()
```
# Autoencoder:
# Preparing the data
First, let's drop the Time column (not going to use it) and use the scikit's StandardScaler on the Amount. The scaler removes the mean and scales the values to unit variance:
```
from sklearn.preprocessing import StandardScaler
data = df.drop(['Time'], axis=1)
data['Amount'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1))
```
Training our Autoencoder is gonna be a bit different from what we are used to. Let's say you have a dataset containing a lot of non fraudulent transactions at hand. You want to detect any anomaly on new transactions. We will create this situation by training our model on the normal transactions, only. Reserving the correct class on the test set will give us a way to evaluate the performance of our model. We will reserve 20% of our data for testing:
```
X_train, X_test = train_test_split(data, test_size=0.2, random_state=RANDOM_SEED)
X_train = X_train[X_train.Class == 0]
X_train = X_train.drop(['Class'], axis=1)
y_test = X_test['Class']
X_test = X_test.drop(['Class'], axis=1)
X_train = X_train.values
X_test = X_test.values
X_train.shape
```
# Building the model
Our Autoencoder uses 4 fully connected layers with 14, 7, 7 and 29 neurons respectively. The first two layers are used for our encoder, the last two go for the decoder. Additionally, L1 regularization will be used during training:
```
input_dim = X_train.shape[1]
encoding_dim = 50
input_layer = Input(shape=(input_dim, ))
encoder = Dense(encoding_dim, activation="relu",
activity_regularizer=regularizers.l2(10e-5))(input_layer)
encoder = Dense(int(encoding_dim / 2), activation="relu")(encoder)
decoder = Dense(int(encoding_dim / 2), activation='relu')(encoder)
decoder = Dense(input_dim, activation='relu')(decoder)
autoencoder = Model(inputs=input_layer, outputs=decoder)
```
Let's train our model for 100 epochs with a batch size of 32 samples and save the best performing model to a file. The ModelCheckpoint provided by Keras is really handy for such tasks. Additionally, the training progress will be exported in a format that TensorBoard understands.
```
nb_epoch = 100
batch_size = 32
autoencoder.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['accuracy'])
mc_callback = ModelCheckpoint(filepath="model.h5",
verbose=0,
save_best_only=True)
tb_callback = TensorBoard(log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=True)
es_callback = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=5,
verbose=2,
mode='auto',
baseline=None,
restore_best_weights=False)
history = autoencoder.fit(X_train, X_train,
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_data=(X_test, X_test),
verbose=1,
callbacks=[mc_callback, tb_callback, es_callback]).history
# autoencoder = load_model('model.h5')
```
# Evaluation
```
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right');
```
The reconstruction error on our training and test data seems to converge nicely. Is it low enough? Let's have a closer look at the error distribution:
```
predictions = autoencoder.predict(X_test)
mse = np.mean(np.power(X_test - predictions, 2), axis=1)
error_df = pd.DataFrame({'reconstruction_error': mse,
'true_class': y_test})
error_df.describe()
```
## Reconstruction error without fraud
```
fig = plt.figure()
ax = fig.add_subplot(111)
normal_error_df = error_df[(error_df['true_class']== 0) & (error_df['reconstruction_error'] < 10)]
_ = ax.hist(normal_error_df.reconstruction_error.values, bins=10)
```
## Reconstruction error with fraud
```
fig = plt.figure()
ax = fig.add_subplot(111)
fraud_error_df = error_df[error_df['true_class'] == 1]
_ = ax.hist(fraud_error_df.reconstruction_error.values, bins=10)
from sklearn.metrics import (confusion_matrix, precision_recall_curve, auc,
roc_curve, recall_score, classification_report, f1_score,
precision_recall_fscore_support)
```
ROC curves are very useful tool for understanding the performance of binary classifiers. However, our case is a bit out of the ordinary. We have a very imbalanced dataset. Nonetheless, let's have a look at our ROC curve:
```
fpr, tpr, thresholds = roc_curve(error_df.true_class, error_df.reconstruction_error)
roc_auc = auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, label='AUC = %0.4f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.001, 1])
plt.ylim([0, 1.001])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show();
```
## Precision vs Recall
```
precision, recall, th = precision_recall_curve(error_df.true_class, error_df.reconstruction_error)
plt.plot(recall, precision, 'b', label='Precision-Recall curve')
plt.title('Recall vs Precision')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.show()
plt.plot(th, precision[1:], 'b', label='Threshold-Precision curve')
plt.title('Precision for different threshold values')
plt.xlabel('Threshold')
plt.ylabel('Precision')
plt.show()
plt.plot(th, recall[1:], 'b', label='Threshold-Recall curve')
plt.title('Recall for different threshold values')
plt.xlabel('Reconstruction error')
plt.ylabel('Recall')
plt.show()
```
## Prediction
```
threshold = 2.9
groups = error_df.groupby('true_class')
fig, ax = plt.subplots()
for name, group in groups:
ax.plot(group.index, group.reconstruction_error, marker='o', ms=3.5, linestyle='',
label= "Fraud" if name == 1 else "Normal")
ax.hlines(threshold, ax.get_xlim()[0], ax.get_xlim()[1], colors="r", zorder=100, label='Threshold')
ax.legend()
plt.title("Reconstruction error for different classes")
plt.ylabel("Reconstruction error")
plt.xlabel("Data point index")
plt.show();
y_pred = [1 if e > threshold else 0 for e in error_df.reconstruction_error.values]
conf_matrix = confusion_matrix(error_df.true_class, y_pred)
plt.figure(figsize=(12, 12))
sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d");
plt.title("Confusion matrix")
plt.ylabel('True class')
plt.xlabel('Predicted class')
plt.show()
```
# References
- [Building Autoencoders in Keras](https://blog.keras.io/building-autoencoders-in-keras.html)
- [Stanford tutorial on Autoencoders](http://ufldl.stanford.edu/tutorial/unsupervised/Autoencoders/)
- [Stacked Autoencoders in TensorFlow](http://cmgreen.io/2016/01/04/tensorflow_deep_autoencoder.html)
|
github_jupyter
|
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy import stats
import tensorflow as tf
import seaborn as sns
from pylab import rcParams
from sklearn.model_selection import train_test_split
if int(tf.__version__[0]) > 1:
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
from tensorflow.keras import regularizers
else:
from keras.models import Model, load_model
from keras.layers import Input, Dense
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
from keras import regularizers
%matplotlib inline
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
LABELS = ["Normal", "Fraud"]
df = pd.read_csv("data/creditcard.csv")
df.shape
df.isnull().values.any()
count_classes = pd.value_counts(df['Class'], sort = True)
count_classes.plot(kind = 'bar', rot=0)
plt.title("Transaction class distribution")
plt.xticks(range(2), LABELS)
plt.xlabel("Class")
plt.ylabel("Frequency");
frauds = df[df.Class == 1]
normal = df[df.Class == 0]
frauds.shape
normal.shape
frauds.Amount.describe()
normal.Amount.describe()
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Amount per transaction by class')
bins = 50
ax1.hist(frauds.Amount, bins = bins)
ax1.set_title('Fraud')
ax2.hist(normal.Amount, bins = bins)
ax2.set_title('Normal')
plt.xlabel('Amount ($)')
plt.ylabel('Number of Transactions')
plt.xlim((0, 20000))
plt.yscale('log')
plt.show();
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Time of transaction vs Amount by class')
ax1.scatter(frauds.Time, frauds.Amount)
ax1.set_title('Fraud')
ax2.scatter(normal.Time, normal.Amount)
ax2.set_title('Normal')
plt.xlabel('Time (in Seconds)')
plt.ylabel('Amount')
plt.show()
from sklearn.preprocessing import StandardScaler
data = df.drop(['Time'], axis=1)
data['Amount'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1))
X_train, X_test = train_test_split(data, test_size=0.2, random_state=RANDOM_SEED)
X_train = X_train[X_train.Class == 0]
X_train = X_train.drop(['Class'], axis=1)
y_test = X_test['Class']
X_test = X_test.drop(['Class'], axis=1)
X_train = X_train.values
X_test = X_test.values
X_train.shape
input_dim = X_train.shape[1]
encoding_dim = 50
input_layer = Input(shape=(input_dim, ))
encoder = Dense(encoding_dim, activation="relu",
activity_regularizer=regularizers.l2(10e-5))(input_layer)
encoder = Dense(int(encoding_dim / 2), activation="relu")(encoder)
decoder = Dense(int(encoding_dim / 2), activation='relu')(encoder)
decoder = Dense(input_dim, activation='relu')(decoder)
autoencoder = Model(inputs=input_layer, outputs=decoder)
nb_epoch = 100
batch_size = 32
autoencoder.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['accuracy'])
mc_callback = ModelCheckpoint(filepath="model.h5",
verbose=0,
save_best_only=True)
tb_callback = TensorBoard(log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=True)
es_callback = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=5,
verbose=2,
mode='auto',
baseline=None,
restore_best_weights=False)
history = autoencoder.fit(X_train, X_train,
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_data=(X_test, X_test),
verbose=1,
callbacks=[mc_callback, tb_callback, es_callback]).history
# autoencoder = load_model('model.h5')
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right');
predictions = autoencoder.predict(X_test)
mse = np.mean(np.power(X_test - predictions, 2), axis=1)
error_df = pd.DataFrame({'reconstruction_error': mse,
'true_class': y_test})
error_df.describe()
fig = plt.figure()
ax = fig.add_subplot(111)
normal_error_df = error_df[(error_df['true_class']== 0) & (error_df['reconstruction_error'] < 10)]
_ = ax.hist(normal_error_df.reconstruction_error.values, bins=10)
fig = plt.figure()
ax = fig.add_subplot(111)
fraud_error_df = error_df[error_df['true_class'] == 1]
_ = ax.hist(fraud_error_df.reconstruction_error.values, bins=10)
from sklearn.metrics import (confusion_matrix, precision_recall_curve, auc,
roc_curve, recall_score, classification_report, f1_score,
precision_recall_fscore_support)
fpr, tpr, thresholds = roc_curve(error_df.true_class, error_df.reconstruction_error)
roc_auc = auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, label='AUC = %0.4f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.001, 1])
plt.ylim([0, 1.001])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show();
precision, recall, th = precision_recall_curve(error_df.true_class, error_df.reconstruction_error)
plt.plot(recall, precision, 'b', label='Precision-Recall curve')
plt.title('Recall vs Precision')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.show()
plt.plot(th, precision[1:], 'b', label='Threshold-Precision curve')
plt.title('Precision for different threshold values')
plt.xlabel('Threshold')
plt.ylabel('Precision')
plt.show()
plt.plot(th, recall[1:], 'b', label='Threshold-Recall curve')
plt.title('Recall for different threshold values')
plt.xlabel('Reconstruction error')
plt.ylabel('Recall')
plt.show()
threshold = 2.9
groups = error_df.groupby('true_class')
fig, ax = plt.subplots()
for name, group in groups:
ax.plot(group.index, group.reconstruction_error, marker='o', ms=3.5, linestyle='',
label= "Fraud" if name == 1 else "Normal")
ax.hlines(threshold, ax.get_xlim()[0], ax.get_xlim()[1], colors="r", zorder=100, label='Threshold')
ax.legend()
plt.title("Reconstruction error for different classes")
plt.ylabel("Reconstruction error")
plt.xlabel("Data point index")
plt.show();
y_pred = [1 if e > threshold else 0 for e in error_df.reconstruction_error.values]
conf_matrix = confusion_matrix(error_df.true_class, y_pred)
plt.figure(figsize=(12, 12))
sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d");
plt.title("Confusion matrix")
plt.ylabel('True class')
plt.xlabel('Predicted class')
plt.show()
| 0.782247 | 0.980784 |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
%precision 4
%matplotlib inline
```
### Block to load data
```
airfare_demand_data = pd.read_csv(r'D:\Exercises\ML Lab\airq402.dat.txt', sep='\s+',header=None)
airfare_demand_data_label = pd.read_csv(r'D:\Exercises\ML Lab\airq402.txt', sep='\n+',header=None)
wine_quality_red = pd.read_csv(r'D:\Exercises\ML Lab\winequality-red.csv', sep=';')
wine_quality_white = pd.read_csv(r'D:\Exercises\ML Lab\winequality-white.csv', sep=';')
```
### Define defintion and methods for gradient, lease square and gradient descent
```
np.random.RandomState(4)
def produce_random_index(size):
indexes = np.arange(0,size,1)
np.random.shuffle(indexes)
return indexes
#returns the function value
def gradient_value(x,y,b):
return np.around(-(np.matmul((2)*(np.transpose(x)),(y-np.matmul(x,b)))), decimals=9)
#returns the gradient value
def function_value(x,y,b):
return np.around(np.sum(y-np.matmul(x,b))**2, decimals=9)
def bold_step(x,y,b,alpha=1):
alpha_p = 1.1
alpha_m = 0.5
alpha*=alpha_p
d = - gradient_value(x,y,b)
#print((f(x,y,b) - f(np.add(np.matrix(x),np.array(0.1*d.T)),y,b)) < 0)
while ((function_value(x,y,b) - function_value(x,y,np.add(b,np.array(alpha*d.T)))) <= 0).all():
alpha = np.around(alpha*alpha_m , decimals=9)
#print(f"{alpha}")
return alpha
def armijo_step_length(x,y,b,alpha=1, delta=0.5, steep = 0.001):
alpha = 1
d = - gradient_value(x,y,b)
while ((function_value(x,y,b) - function_value(x,y,np.add(np.matrix(b),np.array(alpha*d.T)))) <= (alpha*steep*(d)**2).T).all():
#print(f"{alpha}")
alpha = np.around(alpha*delta, decimals=9)
return alpha
def residual(f_prev,f_curr):
return np.around(np.mean(np.abs(f_prev-f_curr)), decimals= 9)
def rmse(y,y_pred):
return np.around(np.sqrt(np.mean(np.asarray(y-y_pred)**2)), decimals= 9)
def gradient_descent_bold(x, y,x_t,y_t, alpha=0.000001, tol=1e-6, N=1000):
residual_plot_test = {}
residual_plot_train = {}
rmse_plot = {}
beta = np.ones((x.shape[1],1))
#print(f"beta {beta}")
for k in range(N):
y_pred = np.matmul(x,beta)
#print(f"Y {y_pred}")
gradient = (np.matmul(np.transpose(x),(y-y_pred)))
#print(f"gradient {y_pred.shape}")
alpha = bold_step(x,y,beta)
#print(alpha)
beta_new = beta + alpha*(gradient)
#print(f"beta_new {beta_new}")
residual_plot_train[k] = residual(np.asarray(y-np.matmul(x,beta))**2,np.asarray(y-np.matmul(x,beta_new))**2)
residual_plot_test[k] = residual(np.asarray(y_t-np.matmul(x_t,beta))**2,np.asarray(y_t-np.matmul(x_t,beta_new))**2)
rmse_plot[k] = rmse(y_t, np.matmul(x_t,beta_new))
#print(f"The new beta {beta_new}\n")
beta = beta_new
plot(residual_plot_train,residual_plot_test, rmse_plot, "Gradient Descent using Bold")
return beta, residual_plot_train, rmse_plot
def gradient_descent_armijo(x, y,x_t,y_t, alpha=0.000001, tol=1e-6, N=1000):
residual_plot_test = {}
residual_plot_train = {}
rmse_plot = {}
beta = np.ones((x.shape[1],1))
#print(f"beta {beta}")
for k in range(N):
y_pred = np.matmul(x,beta)
#print(f"Y {y_pred}")
gradient = (np.matmul(np.transpose(x),(y-y_pred)))
#print(f"gradient {y_pred.shape}")
alpha = armijo_step_length(x,y,beta)
beta_new = beta + alpha*(gradient)
#print(f"beta_new {beta_new}")
residual_plot_train[k] = residual((y-np.matmul(x,beta))**2,(y-np.matmul(x,beta_new))**2)
residual_plot_test[k] = residual((y_t-np.matmul(x_t,beta))**2,(y_t-np.matmul(x_t,beta_new))**2)
rmse_plot[k] = rmse(y_t, np.matmul(x_t,beta_new))
#print(f"The new beta {beta_new}\n")
beta = beta_new
plot(residual_plot_train,residual_plot_test, rmse_plot, "Gradient Descent using Arimijo")
return beta, residual_plot_train, rmse_plot
def gradient_descent(x,y,x_t,y_t, delta=0.5,alpha=0.00001, tol=1e-6, N=10):
residual_plot_test = {}
residual_plot_train = {}
rmse_plot = {}
beta = np.ones((x.shape[1],1))
#print(f"beta {beta}")
for k in range(N):
y_pred = np.matmul(x,beta)
#print(f"Y {y_pred}")
gradient = (np.matmul(np.transpose(x),(y-y_pred)))
#print(f"gradient {y_pred.shape}")
beta_new = beta + alpha*(gradient)
#print(f"beta_new {beta_new}")
residual_plot_train[k] = residual((y-np.matmul(x,beta))**2,(y-np.matmul(x,beta_new))**2)
residual_plot_test[k] = residual((y_t-np.matmul(x_t,beta))**2,(y_t-np.matmul(x_t,beta_new))**2)
rmse_plot[k] = rmse(y_t, np.matmul(x_t,beta_new))
#print(f"The new beta {beta_new}\n")
beta = beta_new
plot(residual_plot_train,residual_plot_test, rmse_plot, "Gradient Descent with learning rate "+str(alpha))
return beta, residual_plot_train, rmse_plot
def plot(residual_plot_train,residual_plot_test, rmse_plot,title):
fig, (ax1,ax2) = plt.subplots(1, 2, sharex=True,figsize=(16, 10))
ax1.plot(list(residual_plot_train.keys()),list(residual_plot_train.values()),'-r')
ax1.plot(list(residual_plot_test.keys()),list(residual_plot_test.values()),'-g')
ax2.plot(list(rmse_plot.keys()),list(rmse_plot.values()),'-r')
ax1.legend(['Residual vs Iteration Training','Residual vs Iteration Test'])
ax2.legend(['MSE vs Iteration'])
ax1.set_xlabel('Iteration')
ax1.set_ylabel('Residual')
ax2.set_xlabel('Iteration')
ax2.set_ylabel('RMSE')
ax1.grid()
ax2.grid()
ax1.set_title(title)
ax2.set_title(title)
```
### A common stratergy used throughout this exercise by me was forward variable selection regression, which is an greedy approach to select a feature by observing the error with the feature in the model than without it.
## Performing backtracking line search for learning rate using Armijo and Boldstep. Followed by Gradient Descent using exact line search for AIRFARE DATA
```
'''
labels = []
for row in airfare_demand_data_label[5:][0]:
labels.append(row.split(' ')[0])
airfare_demand_data.columns=labels
'''
lab = ['City1','City2','Average Fare','Distance','Average weekly passengers','market leading airline','market share leading','Average fare leading','Low price airline','market share low price','price']
airfare_demand_data.rename(index=str, columns=dict((el,l) for el,l in zip(range(0,11),lab)), inplace=True)
airfare_demand_data.head()
non_num = airfare_demand_data.select_dtypes(exclude=np.number).columns
airfare_demand_data = pd.concat([airfare_demand_data.drop(non_num, axis=1), pd.get_dummies(airfare_demand_data[non_num])], axis=1)
airfare_demand_data.head()
```
### - We can use and iterator to label the columns of the data, since we have repeated columns names, a better approach would be to do selective labelling
### - We use select_dtypes method to select colums with non numeric datatype and use pandas get_dummies to create one hot encoding.
### - We see the head of the dataframe to highlight the one hot encoded columns
```
airfare_demand_data.isna().any().tolist().count(True) #highlights the column having any NaN values
```
### Plotting a correlation matrix using corr() function of pandas and seaborn library
```
f, ax = plt.subplots(figsize=(26, 10))
corr = airfare_demand_data.corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
```
#### As can be seen from correlation plot, there exist a very high correlation between average fare, average fare leading , distance and price. There also is a very high correlation between Average Fare and Average fare leading, thus we can choose to drop one column. Also, I chose Average Weekly Passengers since the price might be fluctuated by the number of passengers.
### I selected two features from the dataset, Distance and Average Weekly Passenger.
```
airfare_demand_data = airfare_demand_data[['Distance','Average weekly passengers', 'price']]
airfare_demand_data.head()
```
### Preprocessing and train/ test split
```
#normalize the data
airfare_demand_data = (airfare_demand_data-airfare_demand_data.min())/(airfare_demand_data.max()-airfare_demand_data.min())
#add bias column at index 0
airfare_demand_data.insert(0,'Bias',np.ones(1000))
#creating the Price column as predictor column
airfare_price = airfare_demand_data['price']
airfare_demand_data.drop('price',inplace=True, axis = 1)
#shuffle the dataset
index_matrices = produce_random_index(len(airfare_demand_data))
#split the dataset
X_train_airfare = airfare_demand_data.iloc[index_matrices[:800]]
X_test_airfare = airfare_demand_data.iloc[index_matrices[800:]]
Y_train_airfare = airfare_price.iloc[index_matrices[:800]].values.reshape(800,1)
Y_test_airfare = airfare_price.iloc[index_matrices[800:]].values.reshape(200,1)
#perform the predictions
m3,res_plot,rmse_plot = gradient_descent(X_train_airfare, Y_train_airfare,X_test_airfare, Y_test_airfare, alpha=0.00001, N=500)
m4,res_plot,rmse_plot = gradient_descent(X_train_airfare, Y_train_airfare,X_test_airfare, Y_test_airfare, alpha=0.000001, N=500)
m5,res_plot,rmse_plot = gradient_descent(X_train_airfare, Y_train_airfare,X_test_airfare, Y_test_airfare, alpha=0.0000001, N=500)
m1,res_plot,rmse_plot = gradient_descent_armijo(X_train_airfare, Y_train_airfare,X_test_airfare, Y_test_airfare, N= 10)
m2,res_plot,rmse_plot = gradient_descent_bold(X_train_airfare, Y_train_airfare,X_test_airfare, Y_test_airfare, N= 10)
print(f"MSE using GD LR 0.00001 in 500 iterations {rmse(Y_test_airfare,(np.matmul(X_test_airfare,m3)))}")
print(f"MSE using GD LR 0.000001 in 500 iterations {rmse(Y_test_airfare,(np.matmul(X_test_airfare,m4)))}")
print(f"MSE using GD LR 0.0000001 in 500 iterations {rmse(Y_test_airfare,(np.matmul(X_test_airfare,m5)))}")
print(f"MSE using Arimijo in 10 iterations {rmse(Y_test_airfare,(np.matmul(X_test_airfare,m1)))}")
print(f"MSE using BOLD in 10 iterations {rmse(Y_test_airfare,(np.matmul(X_test_airfare,m2)))}")
```
### Gradient Descent
#### When the learning rate is decreased the MSE vs Iteration curve on test set is smooth, indicating a very slow convergence to the optimal solution, hence the learning rate of 0.00001 produces the best normalised MSE scores and value further increases as we decrease the learning rate
#### Similar to MSE, Residual trend is observed wherein when the learning rate is decreased the model's performance starts separating itself from test set and train set. This is because the model is still not optimal due to it's low learning rate , thus performing slightly better on training set than test set.
### Armijo and Boldstep
#### Both armijo and boldstep are backtracking line search algorithm that rely on a particular condition to be satisfied in order for the learning rate to be decided, this essentially transforms to the notion that the learning rate must take a step in a bounded region of the function graph alone.
#### We can observe that both armijo and boldstep take very few iterations to converge to the optimal solution due to a variable learning rate used, in the armijo we have used 0.001 steepness for armijo.
## Thus we can observe that using a backtracking line search for selecting a learning rate in gradient descent search yields a better performance, but it does cause a lot more computation to calculate gradient during back tracking conditon repeatedly
## Performing backtracking line search for learning rate using Armijo and Boldstep. Followed by Gradient Descent using exact line search for REDWINE DATA
```
non_num_wine_quality_red = wine_quality_red.select_dtypes(exclude=np.number).columns #get the non numeric columns
len(non_num_wine_quality_red) #returned 0
#wine_quality_red= pd.get_dummies(wine_quality_red[non_num_wine_quality_red]) #since there are no non numeric columns no dummies
wine_quality_red.isna().any().tolist().count(True) #count any undefined values
wine_quality_red = pd.read_csv(r'D:\Exercises\ML Lab\winequality-red.csv', sep=';')
f, ax = plt.subplots(figsize=(26, 10))
corr = wine_quality_red.corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
```
#### As can be seen from correlation plot, there exist a very high correlation between alcohol, sulphates, citric acid and quality. There also is a very correlation between citric acid and sulphates, thus we can choose to drop one column. Thus, I selected two columns Alcohol and Sulphates feature to predict Quality
```
wine_quality_red = wine_quality_red[['alcohol','sulphates','quality']]
#normalize the data
wine_quality_red = (wine_quality_red-wine_quality_red.min())/(wine_quality_red.max()-wine_quality_red.min())
#add bias column at index 0
wine_quality_red.insert(0,'Bias',np.ones(1599))
#create income variable
wine_quality_r = wine_quality_red['quality']
wine_quality_red.drop('quality',inplace=True, axis = 1)
#shuffle the dataset
index_matrices = produce_random_index(len(wine_quality_red))
#split the dataset
X_train_red = wine_quality_red.iloc[index_matrices[:1279]]
X_test_red = wine_quality_red.iloc[index_matrices[1279:]]
Y_train_red = wine_quality_r.iloc[index_matrices[:1279]].values.reshape(1279,1)
Y_test_red = wine_quality_r.iloc[index_matrices[1279:]].values.reshape(320,1)
m3,res_plot,rmse_plot = gradient_descent(X_train_red, Y_train_red,X_test_red, Y_test_red, alpha=0.00001, N=500)
m4,res_plot,rmse_plot = gradient_descent(X_train_red, Y_train_red,X_test_red, Y_test_red, alpha=0.000001, N=500)
m5,res_plot,rmse_plot = gradient_descent(X_train_red, Y_train_red,X_test_red, Y_test_red, alpha=0.0000001, N=500)
m1,res_plot,rmse_plot = gradient_descent_armijo(X_train_red, Y_train_red,X_test_red, Y_test_red, N= 10)
m2,res_plot,rmse_plot = gradient_descent_bold(X_train_red, Y_train_red,X_test_red, Y_test_red, N=10)
print(f"MSE using GD LR 0.00001 in 500 iterations {rmse(Y_test_red,(np.matmul(X_test_red,m3)))}")
print(f"MSE using GD LR 0.000001 in 500 iterations {rmse(Y_test_red,(np.matmul(X_test_red,m4)))}")
print(f"MSE using GD LR 0.0000001 in 500 iterations {rmse(Y_test_red,(np.matmul(X_test_red,m5)))}")
print(f"MSE using Arimijo in 10 iterations {rmse(Y_test_red,(np.matmul(X_test_red,m1)))}")
print(f"MSE using BOLD in 10 iterations {rmse(Y_test_red,(np.matmul(X_test_red,m2)))}")
```
### Gradient Descent
#### When the learning rate is decreased the MSE vs Iteration curve on test set is smooth, indicating a very slow convergence to the optimal solution, hence the learning rate of 0.00001 produces the best normalised MSE scores and value further increases as we decrease the learning rate
#### Similar to MSE, Residual trend is observed wherein when the learning rate is decreased the model's performance starts separating itself from test set and train set. This is because the model is still not optimal due to it's low learning rate , thus performing slightly better on training set than test set.
### Armijo and Boldstep
#### Both armijo and boldstep are backtracking line search algorithm that rely on a particular condition to be satisfied in order for the learning rate to be decided, this essentially transforms to the notion that the learning rate must take a step in a bounded region of the functin graph alone.
#### We can observe that both armijo and boldstep take very few iterations to converge to the optimal solution due to a variable learning rate used, in the armijo we have used 0.001 steepness for armijo.
## Thus we can observe that using a backtracking line search for selecting a learning rate in gradient descent search yields a better performance, but it does cause a lot more computation to calculate gradient during back tracking conditon repeatedly.
## Performing backtracking line search for learning rate using Armijo and Boldstep. Followed by Gradient Descent using exact line search for WHITE WINE DATA
```
non_num_wine_quality_white = wine_quality_white.select_dtypes(exclude=np.number).columns #get the non numeric columns
len(non_num_wine_quality_white) #returned 0
#wine_quality_red= pd.get_dummies(wine_quality_red[non_num_wine_quality_red]) #since there are no non numeric columns no dummies
wine_quality_white.isna().any().tolist().count(True) #count any undefined values
wine_quality_white = pd.read_csv(r'D:\Exercises\ML Lab\winequality-white.csv', sep=';')
f, ax = plt.subplots(figsize=(26, 10))
corr = wine_quality_white.corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
```
#### As can be seen from correlation plot, there exist a very high correlation between alcohol and quality. I also selected the pH feature since the performance after adding pH as a single feature was better compared to adding other features in the first step of forward variable selection regression process.
```
wine_quality_white = wine_quality_white[['alcohol','pH','quality']]
#normalize the data
wine_quality_white = (wine_quality_white-wine_quality_white.min())/(wine_quality_white.max()-wine_quality_white.min())
#add bias column at index 0
wine_quality_white.insert(0,'Bias',np.ones(4898))
#create income variable
wine_quality = wine_quality_white['quality']
wine_quality_white.drop('quality',inplace=True, axis = 1)
#shuffle the dataset
index_matrices = produce_random_index(len(wine_quality_white))
#split the dataset
X_train_white = wine_quality_white.iloc[index_matrices[:2000]]
X_test_white = wine_quality_white.iloc[index_matrices[2000:3000]]
Y_train_white = wine_quality.iloc[index_matrices[:2000]].values.reshape(2000,1)
Y_test_white = wine_quality.iloc[index_matrices[2000:3000]].values.reshape(1000,1)
m3,res_plot,rmse_plot = gradient_descent(X_train_white, Y_train_white,X_test_white, Y_test_white, alpha=0.00001, N=500)
m4,res_plot,rmse_plot = gradient_descent(X_train_white, Y_train_white,X_test_white, Y_test_white, alpha=0.000001, N=500)
m5,res_plot,rmse_plot = gradient_descent(X_train_white, Y_train_white,X_test_white, Y_test_white, alpha=0.0000001, N=500)
m1,res_plot,rmse_plot = gradient_descent_armijo(X_train_white, Y_train_white,X_test_white, Y_test_white,N=10)
m2,res_plot,rmse_plot = gradient_descent_bold(X_train_white, Y_train_white,X_test_white, Y_test_white,N=10)
print(f"MSE using GD LR 0.00001 in 500 iterations {rmse(Y_test_white,(np.matmul(X_test_white,m3)))}")
print(f"MSE using GD LR 0.000001 in 500 iterations {rmse(Y_test_white,(np.matmul(X_test_white,m4)))}")
print(f"MSE using GD LR 0.0000001 in 500 iterations {rmse(Y_test_white,(np.matmul(X_test_white,m5)))}")
print(f"MSE using Arimijo in 10 iterations {rmse(Y_test_white,(np.matmul(X_test_white,m1)))}")
print(f"MSE using BOLD in 10 iterations {rmse(Y_test_white,(np.matmul(X_test_white,m2)))}")
```
### Gradient Descent
#### When the learning rate is decreased the MSE vs Iteration curve on test set is smooth, indicating a very slow convergence to the optimal solution, hence the learning rate of 0.0001 produces the best normalised MSE scores and value further increases as we decrease the learning rate
#### Similar to MSE, Residual trend is observed wherein when the learning rate is decreased the model's performance starts separating itself from test set and train set. This is because the model is still not optimal due to it's low learning rate , thus performing slightly better on training set than test set.
### Armijo and Boldstep
#### Both armijo and boldstep are backtracking line search algorithm that rely on a particular condition to be satisfied in order for the learning rate to be decided, this essentially transforms to the notion that the learning rate must take a step in a bounded region of the function graph alone.
#### We can observe that both armijo and boldstep take very few iterations to converge to the optimal solution due to a variable learning rate used, in the armijo we have used 0.001 steepness for armijo.
## Thus we can observe that using a backtracking line search for selecting a learning rate in gradient descent search yields a better performance, but it does cause a lot more computation to calculate gradient during back tracking conditon repeatedly.
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
%precision 4
%matplotlib inline
airfare_demand_data = pd.read_csv(r'D:\Exercises\ML Lab\airq402.dat.txt', sep='\s+',header=None)
airfare_demand_data_label = pd.read_csv(r'D:\Exercises\ML Lab\airq402.txt', sep='\n+',header=None)
wine_quality_red = pd.read_csv(r'D:\Exercises\ML Lab\winequality-red.csv', sep=';')
wine_quality_white = pd.read_csv(r'D:\Exercises\ML Lab\winequality-white.csv', sep=';')
np.random.RandomState(4)
def produce_random_index(size):
indexes = np.arange(0,size,1)
np.random.shuffle(indexes)
return indexes
#returns the function value
def gradient_value(x,y,b):
return np.around(-(np.matmul((2)*(np.transpose(x)),(y-np.matmul(x,b)))), decimals=9)
#returns the gradient value
def function_value(x,y,b):
return np.around(np.sum(y-np.matmul(x,b))**2, decimals=9)
def bold_step(x,y,b,alpha=1):
alpha_p = 1.1
alpha_m = 0.5
alpha*=alpha_p
d = - gradient_value(x,y,b)
#print((f(x,y,b) - f(np.add(np.matrix(x),np.array(0.1*d.T)),y,b)) < 0)
while ((function_value(x,y,b) - function_value(x,y,np.add(b,np.array(alpha*d.T)))) <= 0).all():
alpha = np.around(alpha*alpha_m , decimals=9)
#print(f"{alpha}")
return alpha
def armijo_step_length(x,y,b,alpha=1, delta=0.5, steep = 0.001):
alpha = 1
d = - gradient_value(x,y,b)
while ((function_value(x,y,b) - function_value(x,y,np.add(np.matrix(b),np.array(alpha*d.T)))) <= (alpha*steep*(d)**2).T).all():
#print(f"{alpha}")
alpha = np.around(alpha*delta, decimals=9)
return alpha
def residual(f_prev,f_curr):
return np.around(np.mean(np.abs(f_prev-f_curr)), decimals= 9)
def rmse(y,y_pred):
return np.around(np.sqrt(np.mean(np.asarray(y-y_pred)**2)), decimals= 9)
def gradient_descent_bold(x, y,x_t,y_t, alpha=0.000001, tol=1e-6, N=1000):
residual_plot_test = {}
residual_plot_train = {}
rmse_plot = {}
beta = np.ones((x.shape[1],1))
#print(f"beta {beta}")
for k in range(N):
y_pred = np.matmul(x,beta)
#print(f"Y {y_pred}")
gradient = (np.matmul(np.transpose(x),(y-y_pred)))
#print(f"gradient {y_pred.shape}")
alpha = bold_step(x,y,beta)
#print(alpha)
beta_new = beta + alpha*(gradient)
#print(f"beta_new {beta_new}")
residual_plot_train[k] = residual(np.asarray(y-np.matmul(x,beta))**2,np.asarray(y-np.matmul(x,beta_new))**2)
residual_plot_test[k] = residual(np.asarray(y_t-np.matmul(x_t,beta))**2,np.asarray(y_t-np.matmul(x_t,beta_new))**2)
rmse_plot[k] = rmse(y_t, np.matmul(x_t,beta_new))
#print(f"The new beta {beta_new}\n")
beta = beta_new
plot(residual_plot_train,residual_plot_test, rmse_plot, "Gradient Descent using Bold")
return beta, residual_plot_train, rmse_plot
def gradient_descent_armijo(x, y,x_t,y_t, alpha=0.000001, tol=1e-6, N=1000):
residual_plot_test = {}
residual_plot_train = {}
rmse_plot = {}
beta = np.ones((x.shape[1],1))
#print(f"beta {beta}")
for k in range(N):
y_pred = np.matmul(x,beta)
#print(f"Y {y_pred}")
gradient = (np.matmul(np.transpose(x),(y-y_pred)))
#print(f"gradient {y_pred.shape}")
alpha = armijo_step_length(x,y,beta)
beta_new = beta + alpha*(gradient)
#print(f"beta_new {beta_new}")
residual_plot_train[k] = residual((y-np.matmul(x,beta))**2,(y-np.matmul(x,beta_new))**2)
residual_plot_test[k] = residual((y_t-np.matmul(x_t,beta))**2,(y_t-np.matmul(x_t,beta_new))**2)
rmse_plot[k] = rmse(y_t, np.matmul(x_t,beta_new))
#print(f"The new beta {beta_new}\n")
beta = beta_new
plot(residual_plot_train,residual_plot_test, rmse_plot, "Gradient Descent using Arimijo")
return beta, residual_plot_train, rmse_plot
def gradient_descent(x,y,x_t,y_t, delta=0.5,alpha=0.00001, tol=1e-6, N=10):
residual_plot_test = {}
residual_plot_train = {}
rmse_plot = {}
beta = np.ones((x.shape[1],1))
#print(f"beta {beta}")
for k in range(N):
y_pred = np.matmul(x,beta)
#print(f"Y {y_pred}")
gradient = (np.matmul(np.transpose(x),(y-y_pred)))
#print(f"gradient {y_pred.shape}")
beta_new = beta + alpha*(gradient)
#print(f"beta_new {beta_new}")
residual_plot_train[k] = residual((y-np.matmul(x,beta))**2,(y-np.matmul(x,beta_new))**2)
residual_plot_test[k] = residual((y_t-np.matmul(x_t,beta))**2,(y_t-np.matmul(x_t,beta_new))**2)
rmse_plot[k] = rmse(y_t, np.matmul(x_t,beta_new))
#print(f"The new beta {beta_new}\n")
beta = beta_new
plot(residual_plot_train,residual_plot_test, rmse_plot, "Gradient Descent with learning rate "+str(alpha))
return beta, residual_plot_train, rmse_plot
def plot(residual_plot_train,residual_plot_test, rmse_plot,title):
fig, (ax1,ax2) = plt.subplots(1, 2, sharex=True,figsize=(16, 10))
ax1.plot(list(residual_plot_train.keys()),list(residual_plot_train.values()),'-r')
ax1.plot(list(residual_plot_test.keys()),list(residual_plot_test.values()),'-g')
ax2.plot(list(rmse_plot.keys()),list(rmse_plot.values()),'-r')
ax1.legend(['Residual vs Iteration Training','Residual vs Iteration Test'])
ax2.legend(['MSE vs Iteration'])
ax1.set_xlabel('Iteration')
ax1.set_ylabel('Residual')
ax2.set_xlabel('Iteration')
ax2.set_ylabel('RMSE')
ax1.grid()
ax2.grid()
ax1.set_title(title)
ax2.set_title(title)
'''
labels = []
for row in airfare_demand_data_label[5:][0]:
labels.append(row.split(' ')[0])
airfare_demand_data.columns=labels
'''
lab = ['City1','City2','Average Fare','Distance','Average weekly passengers','market leading airline','market share leading','Average fare leading','Low price airline','market share low price','price']
airfare_demand_data.rename(index=str, columns=dict((el,l) for el,l in zip(range(0,11),lab)), inplace=True)
airfare_demand_data.head()
non_num = airfare_demand_data.select_dtypes(exclude=np.number).columns
airfare_demand_data = pd.concat([airfare_demand_data.drop(non_num, axis=1), pd.get_dummies(airfare_demand_data[non_num])], axis=1)
airfare_demand_data.head()
airfare_demand_data.isna().any().tolist().count(True) #highlights the column having any NaN values
f, ax = plt.subplots(figsize=(26, 10))
corr = airfare_demand_data.corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
airfare_demand_data = airfare_demand_data[['Distance','Average weekly passengers', 'price']]
airfare_demand_data.head()
#normalize the data
airfare_demand_data = (airfare_demand_data-airfare_demand_data.min())/(airfare_demand_data.max()-airfare_demand_data.min())
#add bias column at index 0
airfare_demand_data.insert(0,'Bias',np.ones(1000))
#creating the Price column as predictor column
airfare_price = airfare_demand_data['price']
airfare_demand_data.drop('price',inplace=True, axis = 1)
#shuffle the dataset
index_matrices = produce_random_index(len(airfare_demand_data))
#split the dataset
X_train_airfare = airfare_demand_data.iloc[index_matrices[:800]]
X_test_airfare = airfare_demand_data.iloc[index_matrices[800:]]
Y_train_airfare = airfare_price.iloc[index_matrices[:800]].values.reshape(800,1)
Y_test_airfare = airfare_price.iloc[index_matrices[800:]].values.reshape(200,1)
#perform the predictions
m3,res_plot,rmse_plot = gradient_descent(X_train_airfare, Y_train_airfare,X_test_airfare, Y_test_airfare, alpha=0.00001, N=500)
m4,res_plot,rmse_plot = gradient_descent(X_train_airfare, Y_train_airfare,X_test_airfare, Y_test_airfare, alpha=0.000001, N=500)
m5,res_plot,rmse_plot = gradient_descent(X_train_airfare, Y_train_airfare,X_test_airfare, Y_test_airfare, alpha=0.0000001, N=500)
m1,res_plot,rmse_plot = gradient_descent_armijo(X_train_airfare, Y_train_airfare,X_test_airfare, Y_test_airfare, N= 10)
m2,res_plot,rmse_plot = gradient_descent_bold(X_train_airfare, Y_train_airfare,X_test_airfare, Y_test_airfare, N= 10)
print(f"MSE using GD LR 0.00001 in 500 iterations {rmse(Y_test_airfare,(np.matmul(X_test_airfare,m3)))}")
print(f"MSE using GD LR 0.000001 in 500 iterations {rmse(Y_test_airfare,(np.matmul(X_test_airfare,m4)))}")
print(f"MSE using GD LR 0.0000001 in 500 iterations {rmse(Y_test_airfare,(np.matmul(X_test_airfare,m5)))}")
print(f"MSE using Arimijo in 10 iterations {rmse(Y_test_airfare,(np.matmul(X_test_airfare,m1)))}")
print(f"MSE using BOLD in 10 iterations {rmse(Y_test_airfare,(np.matmul(X_test_airfare,m2)))}")
non_num_wine_quality_red = wine_quality_red.select_dtypes(exclude=np.number).columns #get the non numeric columns
len(non_num_wine_quality_red) #returned 0
#wine_quality_red= pd.get_dummies(wine_quality_red[non_num_wine_quality_red]) #since there are no non numeric columns no dummies
wine_quality_red.isna().any().tolist().count(True) #count any undefined values
wine_quality_red = pd.read_csv(r'D:\Exercises\ML Lab\winequality-red.csv', sep=';')
f, ax = plt.subplots(figsize=(26, 10))
corr = wine_quality_red.corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
wine_quality_red = wine_quality_red[['alcohol','sulphates','quality']]
#normalize the data
wine_quality_red = (wine_quality_red-wine_quality_red.min())/(wine_quality_red.max()-wine_quality_red.min())
#add bias column at index 0
wine_quality_red.insert(0,'Bias',np.ones(1599))
#create income variable
wine_quality_r = wine_quality_red['quality']
wine_quality_red.drop('quality',inplace=True, axis = 1)
#shuffle the dataset
index_matrices = produce_random_index(len(wine_quality_red))
#split the dataset
X_train_red = wine_quality_red.iloc[index_matrices[:1279]]
X_test_red = wine_quality_red.iloc[index_matrices[1279:]]
Y_train_red = wine_quality_r.iloc[index_matrices[:1279]].values.reshape(1279,1)
Y_test_red = wine_quality_r.iloc[index_matrices[1279:]].values.reshape(320,1)
m3,res_plot,rmse_plot = gradient_descent(X_train_red, Y_train_red,X_test_red, Y_test_red, alpha=0.00001, N=500)
m4,res_plot,rmse_plot = gradient_descent(X_train_red, Y_train_red,X_test_red, Y_test_red, alpha=0.000001, N=500)
m5,res_plot,rmse_plot = gradient_descent(X_train_red, Y_train_red,X_test_red, Y_test_red, alpha=0.0000001, N=500)
m1,res_plot,rmse_plot = gradient_descent_armijo(X_train_red, Y_train_red,X_test_red, Y_test_red, N= 10)
m2,res_plot,rmse_plot = gradient_descent_bold(X_train_red, Y_train_red,X_test_red, Y_test_red, N=10)
print(f"MSE using GD LR 0.00001 in 500 iterations {rmse(Y_test_red,(np.matmul(X_test_red,m3)))}")
print(f"MSE using GD LR 0.000001 in 500 iterations {rmse(Y_test_red,(np.matmul(X_test_red,m4)))}")
print(f"MSE using GD LR 0.0000001 in 500 iterations {rmse(Y_test_red,(np.matmul(X_test_red,m5)))}")
print(f"MSE using Arimijo in 10 iterations {rmse(Y_test_red,(np.matmul(X_test_red,m1)))}")
print(f"MSE using BOLD in 10 iterations {rmse(Y_test_red,(np.matmul(X_test_red,m2)))}")
non_num_wine_quality_white = wine_quality_white.select_dtypes(exclude=np.number).columns #get the non numeric columns
len(non_num_wine_quality_white) #returned 0
#wine_quality_red= pd.get_dummies(wine_quality_red[non_num_wine_quality_red]) #since there are no non numeric columns no dummies
wine_quality_white.isna().any().tolist().count(True) #count any undefined values
wine_quality_white = pd.read_csv(r'D:\Exercises\ML Lab\winequality-white.csv', sep=';')
f, ax = plt.subplots(figsize=(26, 10))
corr = wine_quality_white.corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
wine_quality_white = wine_quality_white[['alcohol','pH','quality']]
#normalize the data
wine_quality_white = (wine_quality_white-wine_quality_white.min())/(wine_quality_white.max()-wine_quality_white.min())
#add bias column at index 0
wine_quality_white.insert(0,'Bias',np.ones(4898))
#create income variable
wine_quality = wine_quality_white['quality']
wine_quality_white.drop('quality',inplace=True, axis = 1)
#shuffle the dataset
index_matrices = produce_random_index(len(wine_quality_white))
#split the dataset
X_train_white = wine_quality_white.iloc[index_matrices[:2000]]
X_test_white = wine_quality_white.iloc[index_matrices[2000:3000]]
Y_train_white = wine_quality.iloc[index_matrices[:2000]].values.reshape(2000,1)
Y_test_white = wine_quality.iloc[index_matrices[2000:3000]].values.reshape(1000,1)
m3,res_plot,rmse_plot = gradient_descent(X_train_white, Y_train_white,X_test_white, Y_test_white, alpha=0.00001, N=500)
m4,res_plot,rmse_plot = gradient_descent(X_train_white, Y_train_white,X_test_white, Y_test_white, alpha=0.000001, N=500)
m5,res_plot,rmse_plot = gradient_descent(X_train_white, Y_train_white,X_test_white, Y_test_white, alpha=0.0000001, N=500)
m1,res_plot,rmse_plot = gradient_descent_armijo(X_train_white, Y_train_white,X_test_white, Y_test_white,N=10)
m2,res_plot,rmse_plot = gradient_descent_bold(X_train_white, Y_train_white,X_test_white, Y_test_white,N=10)
print(f"MSE using GD LR 0.00001 in 500 iterations {rmse(Y_test_white,(np.matmul(X_test_white,m3)))}")
print(f"MSE using GD LR 0.000001 in 500 iterations {rmse(Y_test_white,(np.matmul(X_test_white,m4)))}")
print(f"MSE using GD LR 0.0000001 in 500 iterations {rmse(Y_test_white,(np.matmul(X_test_white,m5)))}")
print(f"MSE using Arimijo in 10 iterations {rmse(Y_test_white,(np.matmul(X_test_white,m1)))}")
print(f"MSE using BOLD in 10 iterations {rmse(Y_test_white,(np.matmul(X_test_white,m2)))}")
| 0.337531 | 0.810929 |
```
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE FlexibleInstances #-}
import Data.Bits (Bits (bit, complement, popCount, shiftR, (.&.), (.|.)),
FiniteBits (finiteBitSize))
import Data.ByteArray.Hash (FnvHash32 (..), fnv1Hash)
import Data.ByteString.Char8 (pack)
import Data.Char (intToDigit)
import Data.Semigroup ((<>))
import Data.Vector (Vector, drop, singleton, take, (!), (//))
import Data.Word (Word16, Word32)
import Numeric (showIntAtBase)
import Prelude hiding (drop, lookup, take)
import System.TimeIt (timeIt)
import Text.Show.Pretty (pPrint)
newtype Binary a = Binary a deriving (Enum, Ord, Real, Integral, Eq, Num, Bits, FiniteBits)
instance (FiniteBits a, Show a, Integral a) => Show (Binary a) where
show (Binary a) = let
str = showIntAtBase 2 intToDigit a ""
size = finiteBitSize a
in replicate (size - length str) '0' <> str
type Hash = Binary Word32
type Bitmap = Binary Word16
type Shift = Int
class Hashable a where
hash :: a -> Hash
instance Hashable String where
hash s = let
FnvHash32 h = fnv1Hash (pack s)
in Binary h
data HAMT key value
= None
| Leaf Hash key value
| Many Bitmap (Vector (HAMT key value))
| Full (Vector (HAMT key value))
| Coll Hash (Vector (key, value))
deriving (Show)
empty :: HAMT key value
empty = None
bitsPerSubkey :: Int
bitsPerSubkey = 4
subkeyMask :: Bitmap
subkeyMask = (bit bitsPerSubkey) - 1
fullMask :: Bitmap
fullMask = (bit (2^bitsPerSubkey)) - 1
subkey :: Hash -> Shift -> Int
subkey hash shift = fromIntegral $ (fromIntegral $ shiftR hash shift) .&. subkeyMask
maskIndex :: Bitmap -> Bitmap -> Int
maskIndex bitmap mask = popCount (bitmap .&. (mask - 1))
bitMask :: Hash -> Shift -> Bitmap
bitMask hash shift = bit (subkey hash shift)
insertAt :: Vector a -> Int -> a -> Vector a
insertAt vector index a = take index vector <> singleton a <> drop index vector
updateAt :: Vector a -> Int -> a -> Vector a
updateAt vector index a = vector // [(index, a)]
deleteAt :: Vector a -> Int -> Vector a
deleteAt vector index = take index vector <> drop (index+1) vector
insert :: (Hashable key, Eq key) => key -> value -> HAMT key value -> HAMT key value
insert key value hamt = insert' 0 (hash key) key value hamt
insert' :: Eq key => Shift -> Hash -> key -> value -> HAMT key value -> HAMT key value
insert' shift hash key value None = Leaf hash key value
insert' shift hash key value leaf@(Leaf leafHash leafKey leafValue)
| hash == leafHash =
if key == leafKey
then Leaf hash key value
else Coll hash (insertAt (singleton (leafKey, leafValue)) 0 (key, value))
| otherwise = insert' shift hash key value (Many (bitMask leafHash shift) (singleton leaf))
insert' shift hash key value (Many bitmap vector)
| bitmap .&. mask == 0 = let
leaf = Leaf hash key value
vector' = insertAt vector index leaf
bitmap' = bitmap .|. mask
in if bitmap' == fullMask
then Full vector'
else Many bitmap' vector'
| otherwise = let
subtree = vector ! index
subtree' = insert' (shift+bitsPerSubkey) hash key value subtree
vector' = updateAt vector index subtree'
in Many bitmap vector'
where
mask = bitMask hash shift
index = maskIndex bitmap mask
insert' shift hash key value (Full vector) =
let
subtree = vector ! index
subtree' = insert' (shift+bitsPerSubkey) hash key value subtree
vector' = updateAt vector index subtree'
in Full vector'
where
index = subkey hash shift
insert' shift hash key value coll@(Coll collHash vector)
| hash == collHash = Coll collHash (updateOrPrepend 0 (length vector) key value vector)
| otherwise = insert' shift hash key value (Many (bitMask collHash shift) (singleton coll))
where
updateOrPrepend index len key value vector
| index == len = insertAt vector 0 (key, value)
| otherwise = let
(currKey, _) = vector ! index
in if currKey == key
then updateAt vector index (key, value)
else updateOrPrepend (index+1) len key value vector
fromList :: (Hashable key, Eq key) => [(key, value)] -> HAMT key value
fromList = foldr (uncurry insert) empty
lookup :: (Hashable key, Eq key) => key -> HAMT key value -> Maybe value
lookup key hamt = lookup' 0 (hash key) key hamt
lookup' :: Eq key => Shift -> Hash -> key -> HAMT key value -> Maybe value
lookup' shift hash key None = Nothing
lookup' shift hash key (Leaf leafHash leafKey leafValue)
| hash == leafHash && key == leafKey = Just leafValue
| otherwise = Nothing
lookup' shift hash key (Many bitmap vector)
| bitmap .&. mask == 0 = Nothing
| otherwise = lookup' (shift+bitsPerSubkey) hash key (vector ! index)
where
mask = bitMask hash shift
index = maskIndex bitmap mask
lookup' shift hash key (Full vector) = lookup' (shift+bitsPerSubkey) hash key (vector ! index)
where
index = subkey hash shift
lookup' shift hash key (Coll collHash vector)
| hash == collHash = findMatching 0 (length vector) key vector
| otherwise = Nothing
where
findMatching index len key vector
| index == len = Nothing
| otherwise = let
(currKey, currValue) = vector ! index
in if currKey == key
then Just currValue
else findMatching (index+1) len key vector
instance Hashable Int where
hash int = Binary (fromIntegral int)
delete :: (Hashable key, Eq key) => key -> HAMT key value -> HAMT key value
delete key hamt = delete' 0 (hash key) key hamt
delete' :: Eq key => Shift -> Hash -> key -> HAMT key value -> HAMT key value
delete' shift hash key None = None
delete' shift hash key leaf@(Leaf leafHash leafKey leafValue)
| hash == leafHash && key == leafKey = None
| otherwise = leaf
delete' shift hash key many@(Many bitmap vector)
| bitmap .&. mask == 0 = many
| otherwise = let
subtree = vector ! index
subtree' = delete' (shift+bitsPerSubkey) hash key subtree
in case subtree' of
None -> if length vector == 1
then None
else Many (bitmap .&. complement mask) (deleteAt vector index)
Leaf{} -> if length vector == 1
then subtree'
else Many bitmap (updateAt vector index subtree')
_ -> Many bitmap (updateAt vector index subtree')
where
mask = bitMask hash shift
index = maskIndex bitmap mask
delete' shift hash key (Full vector) =
let
subtree = vector ! index
subtree' = delete' (shift+bitsPerSubkey) hash key subtree
in case subtree' of
None -> Many (fullMask .&. complement mask) (deleteAt vector index)
_ -> Full (updateAt vector index subtree')
where
mask = bitMask hash shift
index = subkey hash shift
delete' shift hash key coll@(Coll collHash vector)
| hash == collHash = let
vector' = deleteMatching 0 (length vector) key vector
in if length vector' == 1
then (\(leafKey, leafValue) -> Leaf collHash leafKey leafValue) $ vector' ! 0
else Coll collHash vector'
| otherwise = coll
where
deleteMatching index len key vector
| index == len = vector
| otherwise = let
(currKey, _) = vector ! index
in if currKey == key
then deleteAt vector index
else deleteMatching (index+1) len key vector
import IHaskell.Display.Graphviz
import Control.Monad.Trans.State.Strict
import Control.Monad.Trans.Writer.CPS
import Control.Monad.Trans.Class
import qualified Data.Vector as Vector
import Data.List (intercalate, intersperse, foldl')
import qualified Prelude
data TreeNode
= TreeNode { nodeId :: Int, bitmap :: String, fields :: [Int] }
| LeafNode { leafId :: Int, leafHash :: String, leafKey :: String, leafValue :: String }
| CollNode { collId :: Int, collHash :: String, collPairs :: [(String, String)]}
deriving (Eq, Show)
getFreshId :: State Int Int
getFreshId = do
currentId <- get
put (currentId+1)
pure currentId
number :: (Show k, Show v) => HAMT k v -> WriterT [TreeNode] (State Int) Int
number None = do
tell mempty
pure 0
number (Leaf h k v) = do
i <- lift $ getFreshId
tell [(LeafNode i (show h) (show k) (show v))]
pure i
number (Many b hs) = do
i <- lift $ getFreshId
numbered <- Vector.toList <$> traverse number hs
tell [(TreeNode i (show b) numbered)]
pure i
number (Full hs) = do
i <- lift $ getFreshId
numbered <- Vector.toList <$> traverse number hs
tell [(TreeNode i "FULL" numbered)]
pure i
number (Coll h kvs) = do
i <- lift $ getFreshId
tell [CollNode i (show h) (map (\(k,v) -> (show k, show v)) (Vector.toList kvs))]
pure i
nodeData :: (Show k, Show v) => HAMT k v -> [TreeNode]
nodeData = flip evalState 0 . execWriterT . number
escape = concatMap escaper
where
escaper :: Char -> String
escaper c = case c of
'"' -> "\\\""
'\\' -> "\\\\"
_ -> [c]
nodeLines :: TreeNode -> [String]
nodeLines (LeafNode i h k v) = let
label = intercalate "|" [h, k, v]
line = ("n" ++ show i) ++ " " ++ "[label=\"" ++ escape label ++ "\"]"
in [line]
nodeLines (CollNode i h kvs) = let
label = intercalate "|" [h, shownKvs]
line = ("n" ++ show i) ++ " " ++ "[label=\"" ++ escape label ++ "\"]"
in [line]
where
shownKvs = intercalate "|" $ map (\(k,v) -> k ++ " : " ++ v) kvs
nodeLines (TreeNode i b fs) = let
indices = Prelude.take (length fs) [0..]
pairs = zip indices fs
edges = flip map pairs $ \(f,t) -> "n" ++ show i ++ ":" ++ "f" ++ show f ++ " -> " ++ "n" ++ show t
fields = flip map indices $ \ix -> "<f" ++ show ix ++ ">"
label = intercalate "|" $ b:fields
line = ("n" ++ show i) ++ " " ++ "[label=\"" ++ escape label ++ "\"]"
in (line:edges)
makeLines = concatMap nodeLines
makeDotLines :: [String] -> String
makeDotLines = concatMap (\l -> l ++ ";\n")
preamble = unlines $
[ "digraph {"
, "node [shape=record];"
, "splines=false;"
, "ranksep=2;"
, "nodesep=1;"
]
postamble = unlines $ ["}"]
makeDot :: String -> String
makeDot str = preamble ++ str ++ postamble
dotFromHAMT :: (Show k, Show v) => HAMT k v -> String
dotFromHAMT = makeDot . makeDotLines. makeLines . nodeData
dot $ dotFromHAMT (fromList (map (flip (,) ()) [0, 1, 2^32, 1 + (2^32)]) :: HAMT Int ())
```
|
github_jupyter
|
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE FlexibleInstances #-}
import Data.Bits (Bits (bit, complement, popCount, shiftR, (.&.), (.|.)),
FiniteBits (finiteBitSize))
import Data.ByteArray.Hash (FnvHash32 (..), fnv1Hash)
import Data.ByteString.Char8 (pack)
import Data.Char (intToDigit)
import Data.Semigroup ((<>))
import Data.Vector (Vector, drop, singleton, take, (!), (//))
import Data.Word (Word16, Word32)
import Numeric (showIntAtBase)
import Prelude hiding (drop, lookup, take)
import System.TimeIt (timeIt)
import Text.Show.Pretty (pPrint)
newtype Binary a = Binary a deriving (Enum, Ord, Real, Integral, Eq, Num, Bits, FiniteBits)
instance (FiniteBits a, Show a, Integral a) => Show (Binary a) where
show (Binary a) = let
str = showIntAtBase 2 intToDigit a ""
size = finiteBitSize a
in replicate (size - length str) '0' <> str
type Hash = Binary Word32
type Bitmap = Binary Word16
type Shift = Int
class Hashable a where
hash :: a -> Hash
instance Hashable String where
hash s = let
FnvHash32 h = fnv1Hash (pack s)
in Binary h
data HAMT key value
= None
| Leaf Hash key value
| Many Bitmap (Vector (HAMT key value))
| Full (Vector (HAMT key value))
| Coll Hash (Vector (key, value))
deriving (Show)
empty :: HAMT key value
empty = None
bitsPerSubkey :: Int
bitsPerSubkey = 4
subkeyMask :: Bitmap
subkeyMask = (bit bitsPerSubkey) - 1
fullMask :: Bitmap
fullMask = (bit (2^bitsPerSubkey)) - 1
subkey :: Hash -> Shift -> Int
subkey hash shift = fromIntegral $ (fromIntegral $ shiftR hash shift) .&. subkeyMask
maskIndex :: Bitmap -> Bitmap -> Int
maskIndex bitmap mask = popCount (bitmap .&. (mask - 1))
bitMask :: Hash -> Shift -> Bitmap
bitMask hash shift = bit (subkey hash shift)
insertAt :: Vector a -> Int -> a -> Vector a
insertAt vector index a = take index vector <> singleton a <> drop index vector
updateAt :: Vector a -> Int -> a -> Vector a
updateAt vector index a = vector // [(index, a)]
deleteAt :: Vector a -> Int -> Vector a
deleteAt vector index = take index vector <> drop (index+1) vector
insert :: (Hashable key, Eq key) => key -> value -> HAMT key value -> HAMT key value
insert key value hamt = insert' 0 (hash key) key value hamt
insert' :: Eq key => Shift -> Hash -> key -> value -> HAMT key value -> HAMT key value
insert' shift hash key value None = Leaf hash key value
insert' shift hash key value leaf@(Leaf leafHash leafKey leafValue)
| hash == leafHash =
if key == leafKey
then Leaf hash key value
else Coll hash (insertAt (singleton (leafKey, leafValue)) 0 (key, value))
| otherwise = insert' shift hash key value (Many (bitMask leafHash shift) (singleton leaf))
insert' shift hash key value (Many bitmap vector)
| bitmap .&. mask == 0 = let
leaf = Leaf hash key value
vector' = insertAt vector index leaf
bitmap' = bitmap .|. mask
in if bitmap' == fullMask
then Full vector'
else Many bitmap' vector'
| otherwise = let
subtree = vector ! index
subtree' = insert' (shift+bitsPerSubkey) hash key value subtree
vector' = updateAt vector index subtree'
in Many bitmap vector'
where
mask = bitMask hash shift
index = maskIndex bitmap mask
insert' shift hash key value (Full vector) =
let
subtree = vector ! index
subtree' = insert' (shift+bitsPerSubkey) hash key value subtree
vector' = updateAt vector index subtree'
in Full vector'
where
index = subkey hash shift
insert' shift hash key value coll@(Coll collHash vector)
| hash == collHash = Coll collHash (updateOrPrepend 0 (length vector) key value vector)
| otherwise = insert' shift hash key value (Many (bitMask collHash shift) (singleton coll))
where
updateOrPrepend index len key value vector
| index == len = insertAt vector 0 (key, value)
| otherwise = let
(currKey, _) = vector ! index
in if currKey == key
then updateAt vector index (key, value)
else updateOrPrepend (index+1) len key value vector
fromList :: (Hashable key, Eq key) => [(key, value)] -> HAMT key value
fromList = foldr (uncurry insert) empty
lookup :: (Hashable key, Eq key) => key -> HAMT key value -> Maybe value
lookup key hamt = lookup' 0 (hash key) key hamt
lookup' :: Eq key => Shift -> Hash -> key -> HAMT key value -> Maybe value
lookup' shift hash key None = Nothing
lookup' shift hash key (Leaf leafHash leafKey leafValue)
| hash == leafHash && key == leafKey = Just leafValue
| otherwise = Nothing
lookup' shift hash key (Many bitmap vector)
| bitmap .&. mask == 0 = Nothing
| otherwise = lookup' (shift+bitsPerSubkey) hash key (vector ! index)
where
mask = bitMask hash shift
index = maskIndex bitmap mask
lookup' shift hash key (Full vector) = lookup' (shift+bitsPerSubkey) hash key (vector ! index)
where
index = subkey hash shift
lookup' shift hash key (Coll collHash vector)
| hash == collHash = findMatching 0 (length vector) key vector
| otherwise = Nothing
where
findMatching index len key vector
| index == len = Nothing
| otherwise = let
(currKey, currValue) = vector ! index
in if currKey == key
then Just currValue
else findMatching (index+1) len key vector
instance Hashable Int where
hash int = Binary (fromIntegral int)
delete :: (Hashable key, Eq key) => key -> HAMT key value -> HAMT key value
delete key hamt = delete' 0 (hash key) key hamt
delete' :: Eq key => Shift -> Hash -> key -> HAMT key value -> HAMT key value
delete' shift hash key None = None
delete' shift hash key leaf@(Leaf leafHash leafKey leafValue)
| hash == leafHash && key == leafKey = None
| otherwise = leaf
delete' shift hash key many@(Many bitmap vector)
| bitmap .&. mask == 0 = many
| otherwise = let
subtree = vector ! index
subtree' = delete' (shift+bitsPerSubkey) hash key subtree
in case subtree' of
None -> if length vector == 1
then None
else Many (bitmap .&. complement mask) (deleteAt vector index)
Leaf{} -> if length vector == 1
then subtree'
else Many bitmap (updateAt vector index subtree')
_ -> Many bitmap (updateAt vector index subtree')
where
mask = bitMask hash shift
index = maskIndex bitmap mask
delete' shift hash key (Full vector) =
let
subtree = vector ! index
subtree' = delete' (shift+bitsPerSubkey) hash key subtree
in case subtree' of
None -> Many (fullMask .&. complement mask) (deleteAt vector index)
_ -> Full (updateAt vector index subtree')
where
mask = bitMask hash shift
index = subkey hash shift
delete' shift hash key coll@(Coll collHash vector)
| hash == collHash = let
vector' = deleteMatching 0 (length vector) key vector
in if length vector' == 1
then (\(leafKey, leafValue) -> Leaf collHash leafKey leafValue) $ vector' ! 0
else Coll collHash vector'
| otherwise = coll
where
deleteMatching index len key vector
| index == len = vector
| otherwise = let
(currKey, _) = vector ! index
in if currKey == key
then deleteAt vector index
else deleteMatching (index+1) len key vector
import IHaskell.Display.Graphviz
import Control.Monad.Trans.State.Strict
import Control.Monad.Trans.Writer.CPS
import Control.Monad.Trans.Class
import qualified Data.Vector as Vector
import Data.List (intercalate, intersperse, foldl')
import qualified Prelude
data TreeNode
= TreeNode { nodeId :: Int, bitmap :: String, fields :: [Int] }
| LeafNode { leafId :: Int, leafHash :: String, leafKey :: String, leafValue :: String }
| CollNode { collId :: Int, collHash :: String, collPairs :: [(String, String)]}
deriving (Eq, Show)
getFreshId :: State Int Int
getFreshId = do
currentId <- get
put (currentId+1)
pure currentId
number :: (Show k, Show v) => HAMT k v -> WriterT [TreeNode] (State Int) Int
number None = do
tell mempty
pure 0
number (Leaf h k v) = do
i <- lift $ getFreshId
tell [(LeafNode i (show h) (show k) (show v))]
pure i
number (Many b hs) = do
i <- lift $ getFreshId
numbered <- Vector.toList <$> traverse number hs
tell [(TreeNode i (show b) numbered)]
pure i
number (Full hs) = do
i <- lift $ getFreshId
numbered <- Vector.toList <$> traverse number hs
tell [(TreeNode i "FULL" numbered)]
pure i
number (Coll h kvs) = do
i <- lift $ getFreshId
tell [CollNode i (show h) (map (\(k,v) -> (show k, show v)) (Vector.toList kvs))]
pure i
nodeData :: (Show k, Show v) => HAMT k v -> [TreeNode]
nodeData = flip evalState 0 . execWriterT . number
escape = concatMap escaper
where
escaper :: Char -> String
escaper c = case c of
'"' -> "\\\""
'\\' -> "\\\\"
_ -> [c]
nodeLines :: TreeNode -> [String]
nodeLines (LeafNode i h k v) = let
label = intercalate "|" [h, k, v]
line = ("n" ++ show i) ++ " " ++ "[label=\"" ++ escape label ++ "\"]"
in [line]
nodeLines (CollNode i h kvs) = let
label = intercalate "|" [h, shownKvs]
line = ("n" ++ show i) ++ " " ++ "[label=\"" ++ escape label ++ "\"]"
in [line]
where
shownKvs = intercalate "|" $ map (\(k,v) -> k ++ " : " ++ v) kvs
nodeLines (TreeNode i b fs) = let
indices = Prelude.take (length fs) [0..]
pairs = zip indices fs
edges = flip map pairs $ \(f,t) -> "n" ++ show i ++ ":" ++ "f" ++ show f ++ " -> " ++ "n" ++ show t
fields = flip map indices $ \ix -> "<f" ++ show ix ++ ">"
label = intercalate "|" $ b:fields
line = ("n" ++ show i) ++ " " ++ "[label=\"" ++ escape label ++ "\"]"
in (line:edges)
makeLines = concatMap nodeLines
makeDotLines :: [String] -> String
makeDotLines = concatMap (\l -> l ++ ";\n")
preamble = unlines $
[ "digraph {"
, "node [shape=record];"
, "splines=false;"
, "ranksep=2;"
, "nodesep=1;"
]
postamble = unlines $ ["}"]
makeDot :: String -> String
makeDot str = preamble ++ str ++ postamble
dotFromHAMT :: (Show k, Show v) => HAMT k v -> String
dotFromHAMT = makeDot . makeDotLines. makeLines . nodeData
dot $ dotFromHAMT (fromList (map (flip (,) ()) [0, 1, 2^32, 1 + (2^32)]) :: HAMT Int ())
| 0.560012 | 0.59884 |
# Factory Planning II
## Objective and Prerequisites
Are you up for a major production planning challenge? Try this example where you will learn how to create an optimal production plan that will not only maximize profits, but also determine which month in which to perform maintenance operations on your machines.
More information on this type of model can be found in example #4 of the fifth edition of Modeling Building in Mathematical Programming by H. P. Williams on pages 256 and 302 – 303.
This modeling example is at the intermediate level, where we assume that you know Python and are familiar with the Gurobi Python API. In addition, you should have some knowledge about building mathematical optimization models.
**Download the Repository** <br />
You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip).
**Gurobi License** <br />
In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-MFG-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_factory-planning_2_COM_EVAL_GITHUB_&utm_term=factory-%20planning-problem&utm_content=C_JPM) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-MFG-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_factory-planning_2_ACADEMIC_EVAL_GITHUB_&utm_term=factory-planning-problem&utm_content=C_JPM) as an *academic user*.
---
## Problem Description
A factory makes seven products (Prod 1 to Prod 7) using a range of machines including:
- Four grinders
- Two vertical drills
- Three horizontal drills
- One borer
- One planer
Each product has a defined profit contribution per unit sold (defined as the sales price per unit minus the cost of raw materials). In addition, the manufacturing of each product requires a certain amount of time on each machine (in hours). The profit and manufacturing time value are shown below. A dash indicates that the manufacturing process for the given product does not require that machine.
| <i></i> | PROD1 | PROD2 | PROD3 | PROD4 | PROD5 | PROD6 | PROD7 |
| --- | --- | --- | --- | --- | --- | --- | --- |
| Profit | 10 | 6 | 8 | 4 | 11 | 9 | 3 |
| Grinding | 0.5 | 0.7 | - | - | 0.3 | 0.2 | 0.5 |
| Vertical Drilling | 0.1 | 0.2 | - | 0.3 | - | 0.6 | - |
| Horizontal Drilling | 0.2 | - | 0.8 | - | - | - | 0.6 |
| Boring | 0.05 | 0.03 | - | 0.07 | 0.1 | - | 0.08 |
| Planning | - | - | 0.01 | - | 0.05 | - | 0.05 |
Instead of pre-defining a maintenance schedule for the machines, as was done in Factory Planning I, in this version of the model we will also optimize the maintenance schedule.
The maintenance requirements are as follows:
- Each machine must be down for maintenance in one month of the six.
- The exception to the above are the grinding machines as only two of them need to be down during the six months.
There are limitations on how many of each product can be sold in a given month. These limits are shown below:
| Month | PROD1 | PROD2 | PROD3 | PROD4 | PROD5 | PROD6 | PROD7 |
| --- | --- | --- | --- | --- | --- | --- | --- |
| January | 500 | 1000 | 300 | 300 | 800 | 200 | 100 |
| February | 600 | 500 | 200 | 0 | 400 | 300 | 150 |
| March | 300 | 600 | 0 | 0 | 500 | 400 | 100 |
| April | 200 | 300 | 400 | 500 | 200 | 0 | 100 |
| May | 0 | 100 | 500 | 100 | 1000 | 300 | 0 |
| June | 500 | 500 | 100 | 300 | 1100 | 500 | 60 |
Up to 100 units of each product may be stored in inventory at a cost of $0.50 per unit per month. At the start of January, there is no product inventory. However, by the end of June, there should be 50 units of each product in inventory.
The factory produces products six days a week using two eight-hour shifts per day. It may be assumed that each month consists of 24 working days. Also, for the purposes of this model, there are no production sequencing issues that need to be taken into account.
What should the production and maintenance plans look like? Also, is it possible to recommend any price increases and determine the value of acquiring any new machines?
This problem is based on a larger model built for the Cornish engineering company of Holman Brothers.
---
## Model Formulation
### Sets and Indices
$t \in \text{Months}=\{\text{Jan},\text{Feb},\text{Mar},\text{Apr},\text{May},\text{Jun}\}$: Set of months.
$p \in \text{Products}=\{1,2,\dots,7\}$: Set of products.
$m \in \text{Machines}=\{\text{Grinder},\text{VertDrill},\text{horiDrill},\text{Borer},\text{Planer}\}$: Set of machines.
### Parameters
$\text{hours_per_month} \in \mathbb{R}^+$: Time (in hours/month) available at any machine on a monthly basis. It results from multiplying the number of working days (24 days) by the number of shifts per day (2) by the duration of a shift (8 hours).
$\text{max_inventory} \in \mathbb{N}$: Maximum number of units of a single product type that can be stored in inventory at any given month.
$\text{holding_cost} \in \mathbb{R}^+$: Monthly cost (in USD/unit/month) of keeping in inventory a unit of any product type.
$\text{store_target} \in \mathbb{N}$: Number of units of each product type to keep in inventory at the end of the planning horizon.
$\text{profit}_p \in \mathbb{R}^+$: Profit (in USD/unit) of product $p$.
$\text{installed}_m \in \mathbb{N}$: Number of machines of type $m$ installed in the factory.
$\text{down_req}_{m} \in \mathbb{N}$: Number of machines of type $m$ that should be scheduled for maintenance at some point in the planning horizon.
$\text{time_req}_{m,p} \in \mathbb{R}^+$: Time (in hours/unit) needed on machine $m$ to manufacture one unit of product $p$.
$\text{max_sales}_{t,p} \in \mathbb{N}$: Maximum number of units of product $p$ that can be sold at month $t$.
### Decision Variables
$\text{make}_{t,p} \in \mathbb{R}^+$: Number of units of product $p$ to manufacture at month $t$.
$\text{store}_{t,p} \in [0, \text{max_inventory}] \subset \mathbb{R}^+$: Number of units of product $p$ to store at month $t$.
$\text{sell}_{t,p} \in [0, \text{max_sales}_{t,p}] \subset \mathbb{R}^+$: Number of units of product $p$ to sell at month $t$.
$\text{repair}_{t,m} \in \{0,1,\dots, \text{down_req}_m\} \subset \mathbb{N}$: Number of machines of type $m$ scheduled for maintenance at month $t$.
**Assumption:** We can produce fractional units.
### Objective Function
- **Profit:** Maximize the total profit (in USD) of the planning horizon.
\begin{equation}
\text{Maximize} \quad Z = \sum_{t \in \text{Months}}\sum_{p \in \text{Products}}
(\text{profit}_p*\text{make}_{t,p} - \text{holding_cost}*\text{store}_{t,p})
\tag{0}
\end{equation}
### Constraints
- **Initial Balance:** For each product $p$, the number of units produced should be equal to the number of units sold plus the number stored (in units of product).
\begin{equation}
\text{make}_{\text{Jan},p} = \text{sell}_{\text{Jan},p} + \text{store}_{\text{Jan},p} \quad \forall p \in \text{Products}
\tag{1}
\end{equation}
- **Balance:** For each product $p$, the number of units produced in month $t$ and previously stored should be equal to the number of units sold and stored in that month (in units of product).
\begin{equation}
\text{store}_{t-1,p} + \text{make}_{t,p} = \text{sell}_{t,p} + \text{store}_{t,p} \quad \forall (t,p) \in \text{Months} \setminus \{\text{Jan}\} \times \text{Products}
\tag{2}
\end{equation}
- **Inventory Target:** The number of units of product $p$ kept in inventory at the end of the planning horizon should hit the target (in units of product).
\begin{equation}
\text{store}_{\text{Jun},p} = \text{store_target} \quad \forall p \in \text{Products}
\tag{3}
\end{equation}
- **Machine Capacity:** Total time used to manufacture any product at machine type $m$ cannot exceed its monthly capacity (in hours).
\begin{equation}
\sum_{p \in \text{Products}}\text{time_req}_{m,p}*\text{make}_{t,p} \leq \text{hours_per_month}*(\text{installed}_m - \text{repair}_{t,m}) \quad \forall (t,m) \in \text{Months} \times \text{Machines}
\tag{4}
\end{equation}
- **Maintenance**: The number of machines of type $m$ scheduled for maintenance should meet the requirement.
\begin{equation}
\sum_{t \in \text{Months}}\text{repair}_{t,m} = \text{down_req}_m \quad \forall m \in \text{Machines}
\tag{5}
\end{equation}
---
## Python Implementation
We import the Gurobi Python Module and other Python libraries.
```
import numpy as np
import pandas as pd
import gurobipy as gp
from gurobipy import GRB
# tested with Python 3.7.0 & Gurobi 9.0
```
## Input Data
We define all the input data of the model.
```
# Parameters
products = ["Prod1", "Prod2", "Prod3", "Prod4", "Prod5", "Prod6", "Prod7"]
machines = ["grinder", "vertDrill", "horiDrill", "borer", "planer"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun"]
profit = {"Prod1":10, "Prod2":6, "Prod3":8, "Prod4":4, "Prod5":11, "Prod6":9, "Prod7":3}
time_req = {
"grinder": { "Prod1": 0.5, "Prod2": 0.7, "Prod5": 0.3,
"Prod6": 0.2, "Prod7": 0.5 },
"vertDrill": { "Prod1": 0.1, "Prod2": 0.2, "Prod4": 0.3,
"Prod6": 0.6 },
"horiDrill": { "Prod1": 0.2, "Prod3": 0.8, "Prod7": 0.6 },
"borer": { "Prod1": 0.05,"Prod2": 0.03,"Prod4": 0.07,
"Prod5": 0.1, "Prod7": 0.08 },
"planer": { "Prod3": 0.01,"Prod5": 0.05,"Prod7": 0.05 }
}
# number of each machine available
installed = {"grinder":4, "vertDrill":2, "horiDrill":3, "borer":1, "planer":1}
# number of machines that need to be under maintenance
down_req = {"grinder":2, "vertDrill":2, "horiDrill":3, "borer":1, "planer":1}
# market limitation of sells
max_sales = {
("Jan", "Prod1") : 500,
("Jan", "Prod2") : 1000,
("Jan", "Prod3") : 300,
("Jan", "Prod4") : 300,
("Jan", "Prod5") : 800,
("Jan", "Prod6") : 200,
("Jan", "Prod7") : 100,
("Feb", "Prod1") : 600,
("Feb", "Prod2") : 500,
("Feb", "Prod3") : 200,
("Feb", "Prod4") : 0,
("Feb", "Prod5") : 400,
("Feb", "Prod6") : 300,
("Feb", "Prod7") : 150,
("Mar", "Prod1") : 300,
("Mar", "Prod2") : 600,
("Mar", "Prod3") : 0,
("Mar", "Prod4") : 0,
("Mar", "Prod5") : 500,
("Mar", "Prod6") : 400,
("Mar", "Prod7") : 100,
("Apr", "Prod1") : 200,
("Apr", "Prod2") : 300,
("Apr", "Prod3") : 400,
("Apr", "Prod4") : 500,
("Apr", "Prod5") : 200,
("Apr", "Prod6") : 0,
("Apr", "Prod7") : 100,
("May", "Prod1") : 0,
("May", "Prod2") : 100,
("May", "Prod3") : 500,
("May", "Prod4") : 100,
("May", "Prod5") : 1000,
("May", "Prod6") : 300,
("May", "Prod7") : 0,
("Jun", "Prod1") : 500,
("Jun", "Prod2") : 500,
("Jun", "Prod3") : 100,
("Jun", "Prod4") : 300,
("Jun", "Prod5") : 1100,
("Jun", "Prod6") : 500,
("Jun", "Prod7") : 60,
}
holding_cost = 0.5
max_inventory = 100
store_target = 50
hours_per_month = 2*8*24
```
## Model Deployment
We create a model and the variables. We set the UpdateMode parameter to 1 (which simplifies the code – see the documentation for more details). For each product (seven kinds of products) and each time period (month), we will create variables for the amount of which products will get manufactured, held, and sold. In each month, there is an upper limit on the amount of each product that can be sold. This is due to market limitations. For each type of machine and each month we create a variable d, which tells us how many machines are down in this month of this type.
```
factory = gp.Model('Factory Planning II')
make = factory.addVars(months, products, name="Make") # quantity manufactured
store = factory.addVars(months, products, ub=max_inventory, name="Store") # quantity stored
sell = factory.addVars(months, products, ub=max_sales, name="Sell") # quantity sold
repair = factory.addVars(months, machines, vtype=GRB.INTEGER, ub=down_req, name="Repair") # number of machines down
```
Next, we insert the constraints.
The balance constraints ensure that the amount of product that is in the storage in the prior month and the amount that gets manufactured equals the amount that is sold and held for each product in the current month. This ensures that all products in the model are manufactured in some month. The initial storage is empty.
```
#1. Initial Balance
Balance0 = factory.addConstrs((make[months[0], product] == sell[months[0], product]
+ store[months[0], product] for product in products), name="Initial_Balance")
#2. Balance
Balance = factory.addConstrs((store[months[months.index(month) -1], product] +
make[month, product] == sell[month, product] + store[month, product]
for product in products for month in months
if month != months[0]), name="Balance")
```
The endstore constraints force that at the end of the last month the storage contains the specified amount of each product.
```
#3. Inventory Target
TargetInv = factory.addConstrs((store[months[-1], product] == store_target for product in products), name="End_Balance")
```
The capacity constraints ensure that for each month the time all products require on a certain kind of machine is lower or equal than the available hours for that machine in that month multiplied by the number of available machines in that month. Each product requires some machine hours on different machines. Each machine is down in one or more months due to maintenance, so the number and types of available machines varies per month. There can be multiple machines per machine type.
```
#4. Machine Capacity
MachineCap = factory.addConstrs((gp.quicksum(time_req[machine][product] * make[month, product]
for product in time_req[machine])
<= hours_per_month * (installed[machine] - repair[month, machine])
for machine in machines for month in months),
name = "Capacity")
```
The maintenance constraints ensure that the specified number and types of machines are down due maintenance in some month. Which month a machine is down is now part of the optimization.
```
#5. Maintenance
Maintenance = factory.addConstrs((repair.sum('*', machine) == down_req[machine] for machine in machines), "Maintenance")
```
The objective is to maximize the profit of the company, which consists of the profit for each product minus cost for storing the unsold products. This can be stated as:
```
#0. Objective Function
obj = gp.quicksum(profit[product] * sell[month, product] - holding_cost * store[month, product]
for month in months for product in products)
factory.setObjective(obj, GRB.MAXIMIZE)
```
Next, we start the optimization and Gurobi finds the optimal solution.
```
factory.optimize()
```
---
## Analysis
The result of the optimization model shows that the maximum profit we can achieve is $\$108,855.00$. This is an increase of $\$15,139.82$ over the course of six months compared to the Factory Planning I example as a result of being able to pick the maintenance schedule as opposed to having a fixed one. Let's see the solution that achieves that optimal result.
### Production Plan
This plan determines the amount of each product to make at each period of the planning horizon. For example, in February we make 600 units of product Prod1.
```
rows = months.copy()
columns = products.copy()
make_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for month, product in make.keys():
if (abs(make[month, product].x) > 1e-6):
make_plan.loc[month, product] = np.round(make[month, product].x, 1)
make_plan
```
### Sales Plan
This plan defines the amount of each product to sell at each period of the planning horizon. For example, in February we sell 600 units of product Prod1.
```
rows = months.copy()
columns = products.copy()
sell_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for month, product in sell.keys():
if (abs(sell[month, product].x) > 1e-6):
sell_plan.loc[month, product] = np.round(sell[month, product].x, 1)
sell_plan
```
### Inventory Plan
This plan reflects the amount of product in inventory at the end of each period of the planning horizon. For example, at the end of February we have zero units of Prod1 in inventory.
```
rows = months.copy()
columns = products.copy()
store_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for month, product in store.keys():
if (abs(store[month, product].x) > 1e-6):
store_plan.loc[month, product] = np.round(store[month, product].x, 1)
store_plan
```
### Maintenance Plan
This plan shows the maintenance plan for each period of the planning horizon. For example, 2 machines of type grinder will be down for maintenance in April.
```
rows = months.copy()
columns = machines.copy()
repair_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for month, machine in repair.keys():
if (abs(repair[month, machine].x) > 1e-6):
repair_plan.loc[month, machine] = repair[month, machine].x
repair_plan
```
**Note:** If you want to write your solution to a file, rather than print it to the terminal, you can use the model.write() command. An example implementation is:
`factory.write("factory-planning-2-output.sol")`
---
## References
H. Paul Williams, Model Building in Mathematical Programming, fifth edition.
Copyright © 2020 Gurobi Optimization, LLC
|
github_jupyter
|
import numpy as np
import pandas as pd
import gurobipy as gp
from gurobipy import GRB
# tested with Python 3.7.0 & Gurobi 9.0
# Parameters
products = ["Prod1", "Prod2", "Prod3", "Prod4", "Prod5", "Prod6", "Prod7"]
machines = ["grinder", "vertDrill", "horiDrill", "borer", "planer"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun"]
profit = {"Prod1":10, "Prod2":6, "Prod3":8, "Prod4":4, "Prod5":11, "Prod6":9, "Prod7":3}
time_req = {
"grinder": { "Prod1": 0.5, "Prod2": 0.7, "Prod5": 0.3,
"Prod6": 0.2, "Prod7": 0.5 },
"vertDrill": { "Prod1": 0.1, "Prod2": 0.2, "Prod4": 0.3,
"Prod6": 0.6 },
"horiDrill": { "Prod1": 0.2, "Prod3": 0.8, "Prod7": 0.6 },
"borer": { "Prod1": 0.05,"Prod2": 0.03,"Prod4": 0.07,
"Prod5": 0.1, "Prod7": 0.08 },
"planer": { "Prod3": 0.01,"Prod5": 0.05,"Prod7": 0.05 }
}
# number of each machine available
installed = {"grinder":4, "vertDrill":2, "horiDrill":3, "borer":1, "planer":1}
# number of machines that need to be under maintenance
down_req = {"grinder":2, "vertDrill":2, "horiDrill":3, "borer":1, "planer":1}
# market limitation of sells
max_sales = {
("Jan", "Prod1") : 500,
("Jan", "Prod2") : 1000,
("Jan", "Prod3") : 300,
("Jan", "Prod4") : 300,
("Jan", "Prod5") : 800,
("Jan", "Prod6") : 200,
("Jan", "Prod7") : 100,
("Feb", "Prod1") : 600,
("Feb", "Prod2") : 500,
("Feb", "Prod3") : 200,
("Feb", "Prod4") : 0,
("Feb", "Prod5") : 400,
("Feb", "Prod6") : 300,
("Feb", "Prod7") : 150,
("Mar", "Prod1") : 300,
("Mar", "Prod2") : 600,
("Mar", "Prod3") : 0,
("Mar", "Prod4") : 0,
("Mar", "Prod5") : 500,
("Mar", "Prod6") : 400,
("Mar", "Prod7") : 100,
("Apr", "Prod1") : 200,
("Apr", "Prod2") : 300,
("Apr", "Prod3") : 400,
("Apr", "Prod4") : 500,
("Apr", "Prod5") : 200,
("Apr", "Prod6") : 0,
("Apr", "Prod7") : 100,
("May", "Prod1") : 0,
("May", "Prod2") : 100,
("May", "Prod3") : 500,
("May", "Prod4") : 100,
("May", "Prod5") : 1000,
("May", "Prod6") : 300,
("May", "Prod7") : 0,
("Jun", "Prod1") : 500,
("Jun", "Prod2") : 500,
("Jun", "Prod3") : 100,
("Jun", "Prod4") : 300,
("Jun", "Prod5") : 1100,
("Jun", "Prod6") : 500,
("Jun", "Prod7") : 60,
}
holding_cost = 0.5
max_inventory = 100
store_target = 50
hours_per_month = 2*8*24
factory = gp.Model('Factory Planning II')
make = factory.addVars(months, products, name="Make") # quantity manufactured
store = factory.addVars(months, products, ub=max_inventory, name="Store") # quantity stored
sell = factory.addVars(months, products, ub=max_sales, name="Sell") # quantity sold
repair = factory.addVars(months, machines, vtype=GRB.INTEGER, ub=down_req, name="Repair") # number of machines down
#1. Initial Balance
Balance0 = factory.addConstrs((make[months[0], product] == sell[months[0], product]
+ store[months[0], product] for product in products), name="Initial_Balance")
#2. Balance
Balance = factory.addConstrs((store[months[months.index(month) -1], product] +
make[month, product] == sell[month, product] + store[month, product]
for product in products for month in months
if month != months[0]), name="Balance")
#3. Inventory Target
TargetInv = factory.addConstrs((store[months[-1], product] == store_target for product in products), name="End_Balance")
#4. Machine Capacity
MachineCap = factory.addConstrs((gp.quicksum(time_req[machine][product] * make[month, product]
for product in time_req[machine])
<= hours_per_month * (installed[machine] - repair[month, machine])
for machine in machines for month in months),
name = "Capacity")
#5. Maintenance
Maintenance = factory.addConstrs((repair.sum('*', machine) == down_req[machine] for machine in machines), "Maintenance")
#0. Objective Function
obj = gp.quicksum(profit[product] * sell[month, product] - holding_cost * store[month, product]
for month in months for product in products)
factory.setObjective(obj, GRB.MAXIMIZE)
factory.optimize()
rows = months.copy()
columns = products.copy()
make_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for month, product in make.keys():
if (abs(make[month, product].x) > 1e-6):
make_plan.loc[month, product] = np.round(make[month, product].x, 1)
make_plan
rows = months.copy()
columns = products.copy()
sell_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for month, product in sell.keys():
if (abs(sell[month, product].x) > 1e-6):
sell_plan.loc[month, product] = np.round(sell[month, product].x, 1)
sell_plan
rows = months.copy()
columns = products.copy()
store_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for month, product in store.keys():
if (abs(store[month, product].x) > 1e-6):
store_plan.loc[month, product] = np.round(store[month, product].x, 1)
store_plan
rows = months.copy()
columns = machines.copy()
repair_plan = pd.DataFrame(columns=columns, index=rows, data=0.0)
for month, machine in repair.keys():
if (abs(repair[month, machine].x) > 1e-6):
repair_plan.loc[month, machine] = repair[month, machine].x
repair_plan
| 0.336985 | 0.878419 |
## RETAIL BANKING
Our client is a retail banking institution. Term deposits are a major source
of income for a bank.
A term deposit is a cash investment held at a financial institution. The
money is invested for an agreed rate of interest over a fixed amount of
time, or term.
The bank has various outreach plans to sell term deposits to their
customers such as email marketing, advertisements, telephonic marketing
and digital marketing.
Telephonic marketing campaigns still remain one of the most effective way
to reach out to people. However, they require huge investment as large call
centers are hired to actually execute these campaigns. Hence, it is crucial
to identify the customers most likely to convert beforehand so that they can
be specifically targeted via call.
I am provided with the client data such as : age of the client, their job
type, their marital status, etc. Along with the client data, you are also
provided with the information of the call such as the duration of the call, day
and month of the call, etc. Given this information, My task is to predict if
the client will subscribe to term deposit.
--<<got data set from kaggle.com>>--
```
# importing libraries
import pandas as pd
import numpy as np # For mathematical calculations
import seaborn as sns # For data visualization
import matplotlib.pyplot as plt
import seaborn as sn # For plotting graphs
%matplotlib inline
import warnings # To ignore any warnings
warnings.filterwarnings("ignore")
# loading the data
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
train.columns
test.columns
train.shape, test.shape
```
We have 17 independent variables and 1 target variable, i.e. subscribed in the train dataset. We have similar features in the test dataset as the train dataset except the subscribed. We will predict the subscribed with the help of model built using the train data.
```
# Print data types for each variable
train.dtypes
```
We can see there are two format of data types:
1. **object**: Object format means variables are categorical.
2. **int64**: It represents the integer variables.
```
#printing first five rows of the dataset
train.head()
```
## Univariate Analysis
Now Let's look at the distribution of our target variable, i.e. subscribed. As it is a categorical variable, let us look at its frequency table, percentage distribution and bar plot.
```
train['subscribed'].value_counts()
# Normalize can be set to True to print proportions instead of number
train['subscribed'].value_counts(normalize=True)
# plotting the bar plot of frequencies
train['subscribed'].value_counts().plot.bar()
```
So, 3715 users out of total 31647 have subscribed which is around 12%. Let's now explore the variables to have a better understanding of the dataset. We will first explore the variables individually using univariate analysis, then we will look at the relation between various independent variables and the target variable. We will also look at the correlation plot to see which variables affects the target variable most.
Let's first look at the distribution of age variable to see how many people belongs to a particular age group.
```
sn.distplot(train["age"])
```
We can infer that most of the clients fall in the age group between 20-60. Now let's look at what are the different types of jobs of the clients. As job is a categorical variable, we will look at its frequency table
```
train['job'].value_counts().plot.bar()
```
We see that most of the clients belongs to blue-collar job and the students are least in number as students generally do not take a term deposit. Let's also look at how many clients have default history.
```
train['default'].value_counts().plot.bar()
```
More than 90% of the clients have no default history. Now we will explore these variables against the target variable using bivariate analysis. We will make use of scatter plots for continuous or numeric variables and crosstabs for the categorical variables. Let's start with job and subscribed variable.
## Bivariate Analysis
```
print(pd.crosstab(train['job'],train['subscribed']))
job=pd.crosstab(train['job'],train['subscribed'])
job.div(job.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(8,8))
plt.xlabel('Job')
plt.ylabel('Percentage')
```
From the above graph we can infer that students and retired people have higher chances of subscribing to a term deposit, which is surprising as students generally do not subscribe to a term deposit. The possible reason is that the number of students in the dataset is less and comparatively to other job types, more students have subscribed to a term deposit.
Next, let's explore the default variable against the subscribed variable.
```
print(pd.crosstab(train['default'],train['subscribed']))
default=pd.crosstab(train['default'],train['subscribed'])
default.div(default.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(8,8))
plt.xlabel('default')
plt.ylabel('Percentage')
```
We can infer that clients having no previous default have slightly higher chances of subscribing to a term loan as compared to the clients who have previous default history.
Let's now look at how correlated our numerical variables are. We will see the correlation between each of these variables and the variable which have high negative or positive values are correlated. By this we can get an overview of the variables which might affect our target variable. We will convert our target variable into numeric values first.
```
train['subscribed'].replace('no', 0,inplace=True)
train['subscribed'].replace('yes', 1,inplace=True)
corr = train.corr()
mask = np.array(corr)
mask[np.tril_indices_from(mask)] = False
fig,ax= plt.subplots()
fig.set_size_inches(20,10)
sn.heatmap(corr, mask=mask,vmax=.9, square=True,annot=True, cmap="YlGnBu")
```
We can infer that duration of the call is highly correlated with the target variable. This can be verified as well. As the duration of the call is more, there are higher chances that the client is showing interest in the term deposit and hence there are higher chances that the client will subscribe to term deposit.
Next we will look for any missing values in the dataset.
```
train.isnull().sum()
```
There are no missing values in the train dataset.
Next, we will start to build our predictive model to predict whether a client will subscribe to a term deposit or not.
As the sklearn models takes only numerical input, we will convert the categorical variables into numerical values using dummies. We will remove the ID variables as they are unique values and then apply dummies. We will also remove the target variable and keep it in a separate variable.
## Model Building
```
target = train['subscribed']
train = train.drop('subscribed',1)
# applying dummies on the train dataset
train = pd.get_dummies(train)
```
Now, its time to build our model. We will split the train data into training and validation set so that we will be able to validate the results of our model on the validation set. We will keep 20% data as validation set and rest as the training set.
```
from sklearn.model_selection import train_test_split
# splitting into train and validation with 20% data in validation set and 80% data in train set.
X_train, X_val, y_train, y_val = train_test_split(train, target, test_size = 0.2, random_state=12)
```
Now our data is ready. Its time to build our model and check its performance. Logistic regression is used for classification problems and as it is a classification problem let's first build a Logistic Regression model.
## Logistic Regression
```
from sklearn.linear_model import LogisticRegression
# defining the logistic regression model
lreg = LogisticRegression()
# fitting the model on X_train and y_train
lreg.fit(X_train,y_train)
# making prediction on the validation set
prediction = lreg.predict(X_val)
```
Now I will evaluate how accurate our predictions are. As the evaluation metric for this problem is accuracy, let's calculate the accuracy on validation set.
```
from sklearn.metrics import accuracy_score
# calculating the accuracy score
accuracy_score(y_val, prediction)
```
We can see that I got an accuracy score of around 90% on the validation dataset. Logistic regression has a linear decision boundary.
I will try decision tree algorithm now to check for better accuracy than previous result.
## Decision Tree
```
from sklearn.tree import DecisionTreeClassifier
# defining the decision tree model with depth of 4, you can tune it further to improve the accuracy score
clf = DecisionTreeClassifier(max_depth=4, random_state=0)
# fitting the decision tree model
clf.fit(X_train,y_train)
# making prediction on the validation set
predict = clf.predict(X_val)
# calculating the accuracy score
accuracy_score(y_val, predict)
```
I got an accuracy of more than 90% on the validation set. You can try to improve the score by tuning hyperparameters of the model. Let's now make the prediction on test dataset. We will make the similar changes in the test set as we have done in the training set before making the predictions.
```
test = pd.get_dummies(test)
test_prediction = clf.predict(test)
```
Finally, I will save these predictions into a csv file. Now, I can open this csv file and copy paste the predictions on the provided excel file to generate score.
```
submission = pd.DataFrame()
# creating a Business_Sourced column and saving the predictions in it
submission['ID'] = test['ID']
submission['subscribed'] = test_prediction
```
```
submission['subscribed'].replace(0,'no',inplace=True)
submission['subscribed'].replace(1,'yes',inplace=True)
submission.to_csv('submission.csv', header=True, index=False)
```
Now I have generated the submission file. To generate score (Final Output) :
1. Open the submission.csv file.
2. Copy the values in the subscribed column and paste them in the subscribed column of solution_checker.xlsx file.
3. We can see the accuracy of the model on test dataset under accuracy score column.
|
github_jupyter
|
# importing libraries
import pandas as pd
import numpy as np # For mathematical calculations
import seaborn as sns # For data visualization
import matplotlib.pyplot as plt
import seaborn as sn # For plotting graphs
%matplotlib inline
import warnings # To ignore any warnings
warnings.filterwarnings("ignore")
# loading the data
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
train.columns
test.columns
train.shape, test.shape
# Print data types for each variable
train.dtypes
#printing first five rows of the dataset
train.head()
train['subscribed'].value_counts()
# Normalize can be set to True to print proportions instead of number
train['subscribed'].value_counts(normalize=True)
# plotting the bar plot of frequencies
train['subscribed'].value_counts().plot.bar()
sn.distplot(train["age"])
train['job'].value_counts().plot.bar()
train['default'].value_counts().plot.bar()
print(pd.crosstab(train['job'],train['subscribed']))
job=pd.crosstab(train['job'],train['subscribed'])
job.div(job.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(8,8))
plt.xlabel('Job')
plt.ylabel('Percentage')
print(pd.crosstab(train['default'],train['subscribed']))
default=pd.crosstab(train['default'],train['subscribed'])
default.div(default.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(8,8))
plt.xlabel('default')
plt.ylabel('Percentage')
train['subscribed'].replace('no', 0,inplace=True)
train['subscribed'].replace('yes', 1,inplace=True)
corr = train.corr()
mask = np.array(corr)
mask[np.tril_indices_from(mask)] = False
fig,ax= plt.subplots()
fig.set_size_inches(20,10)
sn.heatmap(corr, mask=mask,vmax=.9, square=True,annot=True, cmap="YlGnBu")
train.isnull().sum()
target = train['subscribed']
train = train.drop('subscribed',1)
# applying dummies on the train dataset
train = pd.get_dummies(train)
from sklearn.model_selection import train_test_split
# splitting into train and validation with 20% data in validation set and 80% data in train set.
X_train, X_val, y_train, y_val = train_test_split(train, target, test_size = 0.2, random_state=12)
from sklearn.linear_model import LogisticRegression
# defining the logistic regression model
lreg = LogisticRegression()
# fitting the model on X_train and y_train
lreg.fit(X_train,y_train)
# making prediction on the validation set
prediction = lreg.predict(X_val)
from sklearn.metrics import accuracy_score
# calculating the accuracy score
accuracy_score(y_val, prediction)
from sklearn.tree import DecisionTreeClassifier
# defining the decision tree model with depth of 4, you can tune it further to improve the accuracy score
clf = DecisionTreeClassifier(max_depth=4, random_state=0)
# fitting the decision tree model
clf.fit(X_train,y_train)
# making prediction on the validation set
predict = clf.predict(X_val)
# calculating the accuracy score
accuracy_score(y_val, predict)
test = pd.get_dummies(test)
test_prediction = clf.predict(test)
submission = pd.DataFrame()
# creating a Business_Sourced column and saving the predictions in it
submission['ID'] = test['ID']
submission['subscribed'] = test_prediction
submission['subscribed'].replace(0,'no',inplace=True)
submission['subscribed'].replace(1,'yes',inplace=True)
submission.to_csv('submission.csv', header=True, index=False)
| 0.370567 | 0.990732 |
```
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example using the NetworkX ego_graph() function to return the main egonet of
the largest hub in a Barabási-Albert network.
"""
__author__="""Drew Conway (drew.conway@nyu.edu)"""
from operator import itemgetter
import networkx as nx
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Create a BA model graph
n=100
m=2
G=nx.generators.barabasi_albert_graph(n,m)
# find node with largest degree
node_and_degree=G.degree()
(largest_hub,degree)=sorted(node_and_degree.items(),key=itemgetter(1))[-1]
# Create ego graph of main hub
hub_ego=nx.ego_graph(G,largest_hub)
# Draw graph
pos=nx.spring_layout(hub_ego)
nx.draw(hub_ego,pos,node_color='b',node_size=50,with_labels=False)
# Draw ego as large and red
nx.draw_networkx_nodes(hub_ego,pos,nodelist=[largest_hub],node_size=300,node_color='r')
plt.savefig('ego_graph.eps', format='eps')
plt.show()
g = nx.Graph()
g.add_edge(1, 2)
g.add_edge(1, 3)
g.add_edge(3, 4)
g.add_edge(1, 4)
gg = nx.Graph()
gg.add_edge(2, 3)
gg.add_edge(2, 5)
gg.add_edge(2, 4)
gg.add_edge(5, 4)
nx.draw(g, with_labels=True, node_color=['red', 'lightgreen', 'lightgreen', 'lightgreen'], alpha=1.0, node_size=400,
font_weight='normal', font_size=18, font_color='black', font_type='bold')
plt.savefig('1.eps', format='eps')
plt.show()
nx.draw(gg, with_labels=True, node_color=['red', 'lightgreen', 'lightgreen', 'lightgreen'],
node_size=400,
font_color='black',
font_type='bold',
font_size=18)
plt.savefig('2.eps', format='eps')
plt.show()
nx.draw(nx.compose(g, gg), with_labels=True, node_color=['red', 'red', 'lightgreen', 'lightgreen', 'lightgreen'],
font_color='black',
font_type='bold',
font_size=18, node_size=400)
plt.savefig('union.eps', format='eps')
plt.show()
gx = g.copy()
gx.add_edge(2, 3)
gx.add_edge(2, 5)
gx.add_edge(2, 4)
nx.draw(gx, with_labels=True, node_color=['red', 'lightgreen', 'lightgreen', 'lightgreen', 'lightgreen'],
font_size=18,
font_type='bold',
font_color='black',
node_size=400)
plt.savefig('neighbors1.eps', format='eps')
plt.show()
gy = gg.copy()
gy.add_edge(1, 2)
gy.add_edge(1, 3)
gy.add_edge(1, 4)
nx.draw(gx, with_labels=True, node_color=['lightgreen', 'red', 'lightgreen', 'lightgreen', 'lightgreen'],
font_size=18,
font_color='black',
font_type='bold',
node_size=400)
plt.savefig('neighbors2.eps', format='eps')
plt.show()
import pylab, numpy
x = numpy.arange(10)
# create a figure for the data
figData = pylab.figure()
ax = pylab.gca()
colors = ['#CC0D19', '#FC6907', '#22941A', '#1661A7']
pylab.plot(x, x, label='Random', color=colors[0])
pylab.plot(x, x, label='Bridges', color=colors[1])
pylab.plot(x, x, label='Friends', color=colors[2])
pylab.plot(x, x, label='Acquaintance', color=colors[3])
# plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# ncol=2, mode="expand", borderaxespad=0.)
# ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
# fancybox=False, shadow=False, ncol=5)
# create a second figure for the legend
figLegend = pylab.figure(figsize = (2, 2))
# produce a legend for the objects in the other figure
pylab.figlegend(*ax.get_legend_handles_labels(), loc = 'upper left')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
figLegend.savefig('a.eps', format='eps')
figData.savefig('b.eps', format='eps')
# plt.show()
plt.clf()
```
|
github_jupyter
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example using the NetworkX ego_graph() function to return the main egonet of
the largest hub in a Barabási-Albert network.
"""
__author__="""Drew Conway (drew.conway@nyu.edu)"""
from operator import itemgetter
import networkx as nx
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Create a BA model graph
n=100
m=2
G=nx.generators.barabasi_albert_graph(n,m)
# find node with largest degree
node_and_degree=G.degree()
(largest_hub,degree)=sorted(node_and_degree.items(),key=itemgetter(1))[-1]
# Create ego graph of main hub
hub_ego=nx.ego_graph(G,largest_hub)
# Draw graph
pos=nx.spring_layout(hub_ego)
nx.draw(hub_ego,pos,node_color='b',node_size=50,with_labels=False)
# Draw ego as large and red
nx.draw_networkx_nodes(hub_ego,pos,nodelist=[largest_hub],node_size=300,node_color='r')
plt.savefig('ego_graph.eps', format='eps')
plt.show()
g = nx.Graph()
g.add_edge(1, 2)
g.add_edge(1, 3)
g.add_edge(3, 4)
g.add_edge(1, 4)
gg = nx.Graph()
gg.add_edge(2, 3)
gg.add_edge(2, 5)
gg.add_edge(2, 4)
gg.add_edge(5, 4)
nx.draw(g, with_labels=True, node_color=['red', 'lightgreen', 'lightgreen', 'lightgreen'], alpha=1.0, node_size=400,
font_weight='normal', font_size=18, font_color='black', font_type='bold')
plt.savefig('1.eps', format='eps')
plt.show()
nx.draw(gg, with_labels=True, node_color=['red', 'lightgreen', 'lightgreen', 'lightgreen'],
node_size=400,
font_color='black',
font_type='bold',
font_size=18)
plt.savefig('2.eps', format='eps')
plt.show()
nx.draw(nx.compose(g, gg), with_labels=True, node_color=['red', 'red', 'lightgreen', 'lightgreen', 'lightgreen'],
font_color='black',
font_type='bold',
font_size=18, node_size=400)
plt.savefig('union.eps', format='eps')
plt.show()
gx = g.copy()
gx.add_edge(2, 3)
gx.add_edge(2, 5)
gx.add_edge(2, 4)
nx.draw(gx, with_labels=True, node_color=['red', 'lightgreen', 'lightgreen', 'lightgreen', 'lightgreen'],
font_size=18,
font_type='bold',
font_color='black',
node_size=400)
plt.savefig('neighbors1.eps', format='eps')
plt.show()
gy = gg.copy()
gy.add_edge(1, 2)
gy.add_edge(1, 3)
gy.add_edge(1, 4)
nx.draw(gx, with_labels=True, node_color=['lightgreen', 'red', 'lightgreen', 'lightgreen', 'lightgreen'],
font_size=18,
font_color='black',
font_type='bold',
node_size=400)
plt.savefig('neighbors2.eps', format='eps')
plt.show()
import pylab, numpy
x = numpy.arange(10)
# create a figure for the data
figData = pylab.figure()
ax = pylab.gca()
colors = ['#CC0D19', '#FC6907', '#22941A', '#1661A7']
pylab.plot(x, x, label='Random', color=colors[0])
pylab.plot(x, x, label='Bridges', color=colors[1])
pylab.plot(x, x, label='Friends', color=colors[2])
pylab.plot(x, x, label='Acquaintance', color=colors[3])
# plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# ncol=2, mode="expand", borderaxespad=0.)
# ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
# fancybox=False, shadow=False, ncol=5)
# create a second figure for the legend
figLegend = pylab.figure(figsize = (2, 2))
# produce a legend for the objects in the other figure
pylab.figlegend(*ax.get_legend_handles_labels(), loc = 'upper left')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
figLegend.savefig('a.eps', format='eps')
figData.savefig('b.eps', format='eps')
# plt.show()
plt.clf()
| 0.409339 | 0.518241 |
# Example: CanvasXpress layout Chart No. 1
This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:
https://www.canvasxpress.org/examples/layout-1.html
This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.
Everything required for the chart to render is included in the code below. Simply run the code block.
```
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="layout1",
data={
"z": {
"Species": [
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica"
]
},
"y": {
"vars": [
"s1",
"s2",
"s3",
"s4",
"s5",
"s6",
"s7",
"s8",
"s9",
"s10",
"s11",
"s12",
"s13",
"s14",
"s15",
"s16",
"s17",
"s18",
"s19",
"s20",
"s21",
"s22",
"s23",
"s24",
"s25",
"s26",
"s27",
"s28",
"s29",
"s30",
"s31",
"s32",
"s33",
"s34",
"s35",
"s36",
"s37",
"s38",
"s39",
"s40",
"s41",
"s42",
"s43",
"s44",
"s45",
"s46",
"s47",
"s48",
"s49",
"s50",
"s51",
"s52",
"s53",
"s54",
"s55",
"s56",
"s57",
"s58",
"s59",
"s60",
"s61",
"s62",
"s63",
"s64",
"s65",
"s66",
"s67",
"s68",
"s69",
"s70",
"s71",
"s72",
"s73",
"s74",
"s75",
"s76",
"s77",
"s78",
"s79",
"s80",
"s81",
"s82",
"s83",
"s84",
"s85",
"s86",
"s87",
"s88",
"s89",
"s90",
"s91",
"s92",
"s93",
"s94",
"s95",
"s96",
"s97",
"s98",
"s99",
"s100",
"s101",
"s102",
"s103",
"s104",
"s105",
"s106",
"s107",
"s108",
"s109",
"s110",
"s111",
"s112",
"s113",
"s114",
"s115",
"s116",
"s117",
"s118",
"s119",
"s120",
"s121",
"s122",
"s123",
"s124",
"s125",
"s126",
"s127",
"s128",
"s129",
"s130",
"s131",
"s132",
"s133",
"s134",
"s135",
"s136",
"s137",
"s138",
"s139",
"s140",
"s141",
"s142",
"s143",
"s144",
"s145",
"s146",
"s147",
"s148",
"s149",
"s150"
],
"smps": [
"Sepal.Length",
"Sepal.Width",
"Petal.Length",
"Petal.Width"
],
"data": [
[
5.1,
3.5,
1.4,
0.2
],
[
4.9,
3,
1.4,
0.2
],
[
4.7,
3.2,
1.3,
0.2
],
[
4.6,
3.1,
1.5,
0.2
],
[
5,
3.6,
1.4,
0.2
],
[
5.4,
3.9,
1.7,
0.4
],
[
4.6,
3.4,
1.4,
0.3
],
[
5,
3.4,
1.5,
0.2
],
[
4.4,
2.9,
1.4,
0.2
],
[
4.9,
3.1,
1.5,
0.1
],
[
5.4,
3.7,
1.5,
0.2
],
[
4.8,
3.4,
1.6,
0.2
],
[
4.8,
3,
1.4,
0.1
],
[
4.3,
3,
1.1,
0.1
],
[
5.8,
4,
1.2,
0.2
],
[
5.7,
4.4,
1.5,
0.4
],
[
5.4,
3.9,
1.3,
0.4
],
[
5.1,
3.5,
1.4,
0.3
],
[
5.7,
3.8,
1.7,
0.3
],
[
5.1,
3.8,
1.5,
0.3
],
[
5.4,
3.4,
1.7,
0.2
],
[
5.1,
3.7,
1.5,
0.4
],
[
4.6,
3.6,
1,
0.2
],
[
5.1,
3.3,
1.7,
0.5
],
[
4.8,
3.4,
1.9,
0.2
],
[
5,
3,
1.6,
0.2
],
[
5,
3.4,
1.6,
0.4
],
[
5.2,
3.5,
1.5,
0.2
],
[
5.2,
3.4,
1.4,
0.2
],
[
4.7,
3.2,
1.6,
0.2
],
[
4.8,
3.1,
1.6,
0.2
],
[
5.4,
3.4,
1.5,
0.4
],
[
5.2,
4.1,
1.5,
0.1
],
[
5.5,
4.2,
1.4,
0.2
],
[
4.9,
3.1,
1.5,
0.2
],
[
5,
3.2,
1.2,
0.2
],
[
5.5,
3.5,
1.3,
0.2
],
[
4.9,
3.6,
1.4,
0.1
],
[
4.4,
3,
1.3,
0.2
],
[
5.1,
3.4,
1.5,
0.2
],
[
5,
3.5,
1.3,
0.3
],
[
4.5,
2.3,
1.3,
0.3
],
[
4.4,
3.2,
1.3,
0.2
],
[
5,
3.5,
1.6,
0.6
],
[
5.1,
3.8,
1.9,
0.4
],
[
4.8,
3,
1.4,
0.3
],
[
5.1,
3.8,
1.6,
0.2
],
[
4.6,
3.2,
1.4,
0.2
],
[
5.3,
3.7,
1.5,
0.2
],
[
5,
3.3,
1.4,
0.2
],
[
7,
3.2,
4.7,
1.4
],
[
6.4,
3.2,
4.5,
1.5
],
[
6.9,
3.1,
4.9,
1.5
],
[
5.5,
2.3,
4,
1.3
],
[
6.5,
2.8,
4.6,
1.5
],
[
5.7,
2.8,
4.5,
1.3
],
[
6.3,
3.3,
4.7,
1.6
],
[
4.9,
2.4,
3.3,
1
],
[
6.6,
2.9,
4.6,
1.3
],
[
5.2,
2.7,
3.9,
1.4
],
[
5,
2,
3.5,
1
],
[
5.9,
3,
4.2,
1.5
],
[
6,
2.2,
4,
1
],
[
6.1,
2.9,
4.7,
1.4
],
[
5.6,
2.9,
3.6,
1.3
],
[
6.7,
3.1,
4.4,
1.4
],
[
5.6,
3,
4.5,
1.5
],
[
5.8,
2.7,
4.1,
1
],
[
6.2,
2.2,
4.5,
1.5
],
[
5.6,
2.5,
3.9,
1.1
],
[
5.9,
3.2,
4.8,
1.8
],
[
6.1,
2.8,
4,
1.3
],
[
6.3,
2.5,
4.9,
1.5
],
[
6.1,
2.8,
4.7,
1.2
],
[
6.4,
2.9,
4.3,
1.3
],
[
6.6,
3,
4.4,
1.4
],
[
6.8,
2.8,
4.8,
1.4
],
[
6.7,
3,
5,
1.7
],
[
6,
2.9,
4.5,
1.5
],
[
5.7,
2.6,
3.5,
1
],
[
5.5,
2.4,
3.8,
1.1
],
[
5.5,
2.4,
3.7,
1
],
[
5.8,
2.7,
3.9,
1.2
],
[
6,
2.7,
5.1,
1.6
],
[
5.4,
3,
4.5,
1.5
],
[
6,
3.4,
4.5,
1.6
],
[
6.7,
3.1,
4.7,
1.5
],
[
6.3,
2.3,
4.4,
1.3
],
[
5.6,
3,
4.1,
1.3
],
[
5.5,
2.5,
4,
1.3
],
[
5.5,
2.6,
4.4,
1.2
],
[
6.1,
3,
4.6,
1.4
],
[
5.8,
2.6,
4,
1.2
],
[
5,
2.3,
3.3,
1
],
[
5.6,
2.7,
4.2,
1.3
],
[
5.7,
3,
4.2,
1.2
],
[
5.7,
2.9,
4.2,
1.3
],
[
6.2,
2.9,
4.3,
1.3
],
[
5.1,
2.5,
3,
1.1
],
[
5.7,
2.8,
4.1,
1.3
],
[
6.3,
3.3,
6,
2.5
],
[
5.8,
2.7,
5.1,
1.9
],
[
7.1,
3,
5.9,
2.1
],
[
6.3,
2.9,
5.6,
1.8
],
[
6.5,
3,
5.8,
2.2
],
[
7.6,
3,
6.6,
2.1
],
[
4.9,
2.5,
4.5,
1.7
],
[
7.3,
2.9,
6.3,
1.8
],
[
6.7,
2.5,
5.8,
1.8
],
[
7.2,
3.6,
6.1,
2.5
],
[
6.5,
3.2,
5.1,
2
],
[
6.4,
2.7,
5.3,
1.9
],
[
6.8,
3,
5.5,
2.1
],
[
5.7,
2.5,
5,
2
],
[
5.8,
2.8,
5.1,
2.4
],
[
6.4,
3.2,
5.3,
2.3
],
[
6.5,
3,
5.5,
1.8
],
[
7.7,
3.8,
6.7,
2.2
],
[
7.7,
2.6,
6.9,
2.3
],
[
6,
2.2,
5,
1.5
],
[
6.9,
3.2,
5.7,
2.3
],
[
5.6,
2.8,
4.9,
2
],
[
7.7,
2.8,
6.7,
2
],
[
6.3,
2.7,
4.9,
1.8
],
[
6.7,
3.3,
5.7,
2.1
],
[
7.2,
3.2,
6,
1.8
],
[
6.2,
2.8,
4.8,
1.8
],
[
6.1,
3,
4.9,
1.8
],
[
6.4,
2.8,
5.6,
2.1
],
[
7.2,
3,
5.8,
1.6
],
[
7.4,
2.8,
6.1,
1.9
],
[
7.9,
3.8,
6.4,
2
],
[
6.4,
2.8,
5.6,
2.2
],
[
6.3,
2.8,
5.1,
1.5
],
[
6.1,
2.6,
5.6,
1.4
],
[
7.7,
3,
6.1,
2.3
],
[
6.3,
3.4,
5.6,
2.4
],
[
6.4,
3.1,
5.5,
1.8
],
[
6,
3,
4.8,
1.8
],
[
6.9,
3.1,
5.4,
2.1
],
[
6.7,
3.1,
5.6,
2.4
],
[
6.9,
3.1,
5.1,
2.3
],
[
5.8,
2.7,
5.1,
1.9
],
[
6.8,
3.2,
5.9,
2.3
],
[
6.7,
3.3,
5.7,
2.5
],
[
6.7,
3,
5.2,
2.3
],
[
6.3,
2.5,
5,
1.9
],
[
6.5,
3,
5.2,
2
],
[
6.2,
3.4,
5.4,
2.3
],
[
5.9,
3,
5.1,
1.8
]
]
},
"m": {
"Name": "Anderson's Iris data set",
"Description": "The data set consists of 50 Ss from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor). Four features were measured from each S: the length and the width of the sepals and petals, in centimetres.",
"Reference": "R. A. Fisher (1936). The use of multiple measurements in taxonomic problems. Annals of Eugenics 7 (2): 179-188."
}
},
config={
"broadcast": True,
"colorBy": "Species",
"graphType": "Scatter2D",
"layoutAdjust": True,
"scatterPlotMatrix": True,
"theme": "CanvasXpress"
},
width=613,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="layout_1.html")
```
|
github_jupyter
|
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="layout1",
data={
"z": {
"Species": [
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica"
]
},
"y": {
"vars": [
"s1",
"s2",
"s3",
"s4",
"s5",
"s6",
"s7",
"s8",
"s9",
"s10",
"s11",
"s12",
"s13",
"s14",
"s15",
"s16",
"s17",
"s18",
"s19",
"s20",
"s21",
"s22",
"s23",
"s24",
"s25",
"s26",
"s27",
"s28",
"s29",
"s30",
"s31",
"s32",
"s33",
"s34",
"s35",
"s36",
"s37",
"s38",
"s39",
"s40",
"s41",
"s42",
"s43",
"s44",
"s45",
"s46",
"s47",
"s48",
"s49",
"s50",
"s51",
"s52",
"s53",
"s54",
"s55",
"s56",
"s57",
"s58",
"s59",
"s60",
"s61",
"s62",
"s63",
"s64",
"s65",
"s66",
"s67",
"s68",
"s69",
"s70",
"s71",
"s72",
"s73",
"s74",
"s75",
"s76",
"s77",
"s78",
"s79",
"s80",
"s81",
"s82",
"s83",
"s84",
"s85",
"s86",
"s87",
"s88",
"s89",
"s90",
"s91",
"s92",
"s93",
"s94",
"s95",
"s96",
"s97",
"s98",
"s99",
"s100",
"s101",
"s102",
"s103",
"s104",
"s105",
"s106",
"s107",
"s108",
"s109",
"s110",
"s111",
"s112",
"s113",
"s114",
"s115",
"s116",
"s117",
"s118",
"s119",
"s120",
"s121",
"s122",
"s123",
"s124",
"s125",
"s126",
"s127",
"s128",
"s129",
"s130",
"s131",
"s132",
"s133",
"s134",
"s135",
"s136",
"s137",
"s138",
"s139",
"s140",
"s141",
"s142",
"s143",
"s144",
"s145",
"s146",
"s147",
"s148",
"s149",
"s150"
],
"smps": [
"Sepal.Length",
"Sepal.Width",
"Petal.Length",
"Petal.Width"
],
"data": [
[
5.1,
3.5,
1.4,
0.2
],
[
4.9,
3,
1.4,
0.2
],
[
4.7,
3.2,
1.3,
0.2
],
[
4.6,
3.1,
1.5,
0.2
],
[
5,
3.6,
1.4,
0.2
],
[
5.4,
3.9,
1.7,
0.4
],
[
4.6,
3.4,
1.4,
0.3
],
[
5,
3.4,
1.5,
0.2
],
[
4.4,
2.9,
1.4,
0.2
],
[
4.9,
3.1,
1.5,
0.1
],
[
5.4,
3.7,
1.5,
0.2
],
[
4.8,
3.4,
1.6,
0.2
],
[
4.8,
3,
1.4,
0.1
],
[
4.3,
3,
1.1,
0.1
],
[
5.8,
4,
1.2,
0.2
],
[
5.7,
4.4,
1.5,
0.4
],
[
5.4,
3.9,
1.3,
0.4
],
[
5.1,
3.5,
1.4,
0.3
],
[
5.7,
3.8,
1.7,
0.3
],
[
5.1,
3.8,
1.5,
0.3
],
[
5.4,
3.4,
1.7,
0.2
],
[
5.1,
3.7,
1.5,
0.4
],
[
4.6,
3.6,
1,
0.2
],
[
5.1,
3.3,
1.7,
0.5
],
[
4.8,
3.4,
1.9,
0.2
],
[
5,
3,
1.6,
0.2
],
[
5,
3.4,
1.6,
0.4
],
[
5.2,
3.5,
1.5,
0.2
],
[
5.2,
3.4,
1.4,
0.2
],
[
4.7,
3.2,
1.6,
0.2
],
[
4.8,
3.1,
1.6,
0.2
],
[
5.4,
3.4,
1.5,
0.4
],
[
5.2,
4.1,
1.5,
0.1
],
[
5.5,
4.2,
1.4,
0.2
],
[
4.9,
3.1,
1.5,
0.2
],
[
5,
3.2,
1.2,
0.2
],
[
5.5,
3.5,
1.3,
0.2
],
[
4.9,
3.6,
1.4,
0.1
],
[
4.4,
3,
1.3,
0.2
],
[
5.1,
3.4,
1.5,
0.2
],
[
5,
3.5,
1.3,
0.3
],
[
4.5,
2.3,
1.3,
0.3
],
[
4.4,
3.2,
1.3,
0.2
],
[
5,
3.5,
1.6,
0.6
],
[
5.1,
3.8,
1.9,
0.4
],
[
4.8,
3,
1.4,
0.3
],
[
5.1,
3.8,
1.6,
0.2
],
[
4.6,
3.2,
1.4,
0.2
],
[
5.3,
3.7,
1.5,
0.2
],
[
5,
3.3,
1.4,
0.2
],
[
7,
3.2,
4.7,
1.4
],
[
6.4,
3.2,
4.5,
1.5
],
[
6.9,
3.1,
4.9,
1.5
],
[
5.5,
2.3,
4,
1.3
],
[
6.5,
2.8,
4.6,
1.5
],
[
5.7,
2.8,
4.5,
1.3
],
[
6.3,
3.3,
4.7,
1.6
],
[
4.9,
2.4,
3.3,
1
],
[
6.6,
2.9,
4.6,
1.3
],
[
5.2,
2.7,
3.9,
1.4
],
[
5,
2,
3.5,
1
],
[
5.9,
3,
4.2,
1.5
],
[
6,
2.2,
4,
1
],
[
6.1,
2.9,
4.7,
1.4
],
[
5.6,
2.9,
3.6,
1.3
],
[
6.7,
3.1,
4.4,
1.4
],
[
5.6,
3,
4.5,
1.5
],
[
5.8,
2.7,
4.1,
1
],
[
6.2,
2.2,
4.5,
1.5
],
[
5.6,
2.5,
3.9,
1.1
],
[
5.9,
3.2,
4.8,
1.8
],
[
6.1,
2.8,
4,
1.3
],
[
6.3,
2.5,
4.9,
1.5
],
[
6.1,
2.8,
4.7,
1.2
],
[
6.4,
2.9,
4.3,
1.3
],
[
6.6,
3,
4.4,
1.4
],
[
6.8,
2.8,
4.8,
1.4
],
[
6.7,
3,
5,
1.7
],
[
6,
2.9,
4.5,
1.5
],
[
5.7,
2.6,
3.5,
1
],
[
5.5,
2.4,
3.8,
1.1
],
[
5.5,
2.4,
3.7,
1
],
[
5.8,
2.7,
3.9,
1.2
],
[
6,
2.7,
5.1,
1.6
],
[
5.4,
3,
4.5,
1.5
],
[
6,
3.4,
4.5,
1.6
],
[
6.7,
3.1,
4.7,
1.5
],
[
6.3,
2.3,
4.4,
1.3
],
[
5.6,
3,
4.1,
1.3
],
[
5.5,
2.5,
4,
1.3
],
[
5.5,
2.6,
4.4,
1.2
],
[
6.1,
3,
4.6,
1.4
],
[
5.8,
2.6,
4,
1.2
],
[
5,
2.3,
3.3,
1
],
[
5.6,
2.7,
4.2,
1.3
],
[
5.7,
3,
4.2,
1.2
],
[
5.7,
2.9,
4.2,
1.3
],
[
6.2,
2.9,
4.3,
1.3
],
[
5.1,
2.5,
3,
1.1
],
[
5.7,
2.8,
4.1,
1.3
],
[
6.3,
3.3,
6,
2.5
],
[
5.8,
2.7,
5.1,
1.9
],
[
7.1,
3,
5.9,
2.1
],
[
6.3,
2.9,
5.6,
1.8
],
[
6.5,
3,
5.8,
2.2
],
[
7.6,
3,
6.6,
2.1
],
[
4.9,
2.5,
4.5,
1.7
],
[
7.3,
2.9,
6.3,
1.8
],
[
6.7,
2.5,
5.8,
1.8
],
[
7.2,
3.6,
6.1,
2.5
],
[
6.5,
3.2,
5.1,
2
],
[
6.4,
2.7,
5.3,
1.9
],
[
6.8,
3,
5.5,
2.1
],
[
5.7,
2.5,
5,
2
],
[
5.8,
2.8,
5.1,
2.4
],
[
6.4,
3.2,
5.3,
2.3
],
[
6.5,
3,
5.5,
1.8
],
[
7.7,
3.8,
6.7,
2.2
],
[
7.7,
2.6,
6.9,
2.3
],
[
6,
2.2,
5,
1.5
],
[
6.9,
3.2,
5.7,
2.3
],
[
5.6,
2.8,
4.9,
2
],
[
7.7,
2.8,
6.7,
2
],
[
6.3,
2.7,
4.9,
1.8
],
[
6.7,
3.3,
5.7,
2.1
],
[
7.2,
3.2,
6,
1.8
],
[
6.2,
2.8,
4.8,
1.8
],
[
6.1,
3,
4.9,
1.8
],
[
6.4,
2.8,
5.6,
2.1
],
[
7.2,
3,
5.8,
1.6
],
[
7.4,
2.8,
6.1,
1.9
],
[
7.9,
3.8,
6.4,
2
],
[
6.4,
2.8,
5.6,
2.2
],
[
6.3,
2.8,
5.1,
1.5
],
[
6.1,
2.6,
5.6,
1.4
],
[
7.7,
3,
6.1,
2.3
],
[
6.3,
3.4,
5.6,
2.4
],
[
6.4,
3.1,
5.5,
1.8
],
[
6,
3,
4.8,
1.8
],
[
6.9,
3.1,
5.4,
2.1
],
[
6.7,
3.1,
5.6,
2.4
],
[
6.9,
3.1,
5.1,
2.3
],
[
5.8,
2.7,
5.1,
1.9
],
[
6.8,
3.2,
5.9,
2.3
],
[
6.7,
3.3,
5.7,
2.5
],
[
6.7,
3,
5.2,
2.3
],
[
6.3,
2.5,
5,
1.9
],
[
6.5,
3,
5.2,
2
],
[
6.2,
3.4,
5.4,
2.3
],
[
5.9,
3,
5.1,
1.8
]
]
},
"m": {
"Name": "Anderson's Iris data set",
"Description": "The data set consists of 50 Ss from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor). Four features were measured from each S: the length and the width of the sepals and petals, in centimetres.",
"Reference": "R. A. Fisher (1936). The use of multiple measurements in taxonomic problems. Annals of Eugenics 7 (2): 179-188."
}
},
config={
"broadcast": True,
"colorBy": "Species",
"graphType": "Scatter2D",
"layoutAdjust": True,
"scatterPlotMatrix": True,
"theme": "CanvasXpress"
},
width=613,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="layout_1.html")
| 0.427516 | 0.862699 |
```
# Se cargan las librerías que se van a utilizar en ambos ejemplos
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from sklearn.impute import SimpleImputer
from sklearn.compose import make_column_transformer
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures # <------ library to perform Polynomial Regression
from sklearn.linear_model import Ridge
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.linear_model import Lasso
from sklearn.model_selection import GridSearchCV
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OrdinalEncoder, OneHotEncoder
pd.set_option('display.max_rows', 90) # by default is 10, if change to None print ALL
pd.set_option('display.max_columns', 90) # by default is 10, if change to None print ALL
## 1) EXTRAER DATOS
# Los datos pueden encontrarse en diferentes formatos, en nuestro caso están en formato csv.
# Se carga la base de datos
train = pd.read_csv('train.csv') #Se encuentra en la misma carpeta que el jupyter notebook
test = pd.read_csv('test.csv') #Se encuentra en la misma carpeta que el jupyter notebook
print(train.shape)
print(test.shape)
train
```
# Eliminate of some columns
Veamos a eleminar las columnas que tienen más del 50% de sus valores como nulas en train
```
col_plus_50percent_null = train.isnull().sum()[train.isnull().sum()>train.shape[0]/2]
col_plus_50percent_null
```
Observemos que también hay casi las mismas columnas en test
```
test.isnull().sum()[test.isnull().sum()>test.shape[0]/2]
```
Entonces nos queda
```
features_drop = ['PoolQC','MiscFeature','Alley','Fence']
train = train.drop(features_drop, axis=1)
test = test.drop(features_drop, axis=1)
```
Comprovemos que ya no tenemos esas variables
```
col_plus_50percent_null = train.isnull().sum()[train.isnull().sum()>train.shape[0]/2]
col_plus_50percent_null
test.isnull().sum()[test.isnull().sum()>test.shape[0]/2]
```
# Separación de variables
Separemos las variables en `X_train`, `X_test`, `y_train`, `y_test`, al igual que elijamos que columnas son numericas, ordinales y nominales
```
numerical = train.select_dtypes(include=np.number).columns.tolist()
numerical.remove('Id')
numerical.remove('SalePrice')
nominal = train.select_dtypes(exclude=np.number).columns.tolist()
# ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
# "ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
# "KitchenQual", "Functional", "GarageCond", "PavedDrive"]
ordinal = []
X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_REAL_test = test[nominal + ordinal + numerical]
```
# Pipelines auxiliares
Para separar mejor el procesamiento de nuestros datos, utilizamos tres pipelines auxiliares
```
# Pipeline datos ordinales
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
# Pipeline datos nominales
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
# Pipeline datos numéricos
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
# Pegado de los tres pipelines
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
```
Finalmente agregamos todo en un solo pipeline
```
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
# ML_model = Lasso(alpha=190)
# ML_model = Ridge(alpha=20)
ML_model = LinearRegression()
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
("estimator", ML_model)
])
complete_pipeline
```
# Predicciones
```
complete_pipeline.fit(X_train, y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score:', r2_score(y_test, y_pred))
p1 = max(max(y_pred), max(y_test))
p2 = min(min(y_pred), min(y_test))
plt.plot([p1, p2], [p1, p2], 'b-')
plt.scatter(y_test,y_pred)
```
# Generación de archivo para Kaggle
```
y_REAL_test = complete_pipeline.predict(X_REAL_test)
pred=pd.DataFrame(y_REAL_test)
sub_df=pd.read_csv('sample_submission.csv')
datasets=pd.concat([sub_df['Id'],pred],axis=1)
datasets.columns=['Id','SalePrice']
datasets.to_csv('sample_submission.csv',index=False)
```
Para subir el archivo es [aquí](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/overview/evaluation)
```
# FUNCIONA PIPELINE LASSO WITH
numerical = train.select_dtypes(include=np.number).columns.tolist()
numerical.remove('Id')
numerical.remove('SalePrice')
nominal = train.select_dtypes(exclude=np.number).columns.tolist()
ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
"ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
"KitchenQual", "Functional", "GarageCond", "PavedDrive"]
ordinal = []
X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
# here we are going to instantiate a ColumnTransformer object with a list of tuples
# each of which has a the name of the preprocessor
# the transformation pipeline (could be a transformer)
# and the list of column names we wish to transform
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
## If you want to test this pipeline run the following code
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
ML_model = Lasso(alpha=1)
ML_model = Ridge(alpha=.1)
# ML_model = LinearRegression()
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
# ("scaler", StandardScaler()), # No mejora la estimación escalando
# ('poly_features', PolynomialFeatures(degree=2)), # empeora con polynomal features
("estimator", ML_model)
])
complete_pipeline.fit(X_train, y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
p1 = max(max(y_pred), max(y_test))
p2 = min(min(y_pred), min(y_test))
plt.plot([p1, p2], [p1, p2], 'b-')
plt.scatter(y_test,y_pred)
```
# ALL IN ONE
```
# FUNCIONA PIPELINE LASSO WITH
numerical = train.select_dtypes(include=np.number).columns.tolist()
numerical.remove('Id')
numerical.remove('SalePrice')
nominal = train.select_dtypes(exclude=np.number).columns.tolist()
ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
"ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
"KitchenQual", "Functional", "GarageCond", "PavedDrive"]
ordinal = []
X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
# here we are going to instantiate a ColumnTransformer object with a list of tuples
# each of which has a the name of the preprocessor
# the transformation pipeline (could be a transformer)
# and the list of column names we wish to transform
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
## If you want to test this pipeline run the following code
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
ML_model = Lasso(alpha=190)
ML_model = LinearRegression()
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
# ("scaler", StandardScaler()), # No mejora la estimación escalando
# ('poly_features', PolynomialFeatures(degree=2)),
("estimator", LinearRegression())
])
complete_pipeline.fit(X_train, y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
p1 = max(max(y_pred), max(y_test))
p2 = min(min(y_pred), min(y_test))
plt.plot([p1, p2], [p1, p2], 'b-')
plt.scatter(y_test,y_pred)
aux = ct.fit_transform(X)
aux = pd.df(aux)
preprocessed_features = preprocessing_pipeline.fit_transform(X_train)
preprocessed_features
from sklearn.linear_model import Lasso
from sklearn.model_selection import GridSearchCV
lasso=Lasso()
parameters={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,10,20,30,35,40,45,50,55,100,200,300]}
lasso_regressor=GridSearchCV(lasso,parameters,scoring='neg_mean_squared_error',cv=5)
lasso_regressor.fit(preprocessing_pipeline.fit_transform(X_train),y_train)
print(lasso_regressor.best_params_)
print(lasso_regressor.best_score_)
```
# Encontrando alpha de Lasso (alpha = 180)
```
parameters={'alpha':[100,150,170,180,190,200,220,250,300]}
ML_model=Lasso()
grid = GridSearchCV(ML_model,parameters,scoring='neg_mean_squared_error',cv=5)
grid.fit(preprocessing_pipeline.fit_transform(X_train),y_train)
# Convert the results of CV into a dataframe
results = pd.DataFrame(grid.cv_results_)[['params', 'mean_test_score', 'rank_test_score']]
results.sort_values('rank_test_score')
```
# Encontrando alpha de Ridge (alpha = 20)
```
parameters={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,10,20,30,35,40,45,50,55,100,200,300]}
ML_model=Ridge()
grid = GridSearchCV(ML_model,parameters,scoring='neg_mean_squared_error',cv=5)
grid.fit(preprocessing_pipeline.fit_transform(X_train),y_train)
# Convert the results of CV into a dataframe
results = pd.DataFrame(grid.cv_results_)[['params', 'mean_test_score', 'rank_test_score']]
results.sort_values('rank_test_score')
```
## Numeric missing values
# One Hot Encoder
```
model
# https://salvatore-raieli.medium.com/a-complete-guide-to-linear-regression-using-gene-expression-data-regularization-f980ba6b11f7
model = Lasso(alpha = 180)
model.fit(preprocessing_pipeline.fit_transform(X_train), y_train)
y_pred = complete_pipeline.predict(X_test)
coefs = model.coef_.flatten()
names = X_train.columns
genes = list(zip(names, coefs))
feature =pd.DataFrame(genes, columns = ["genes", "coefs"])
feature0 = feature.loc[(feature!=0).any(axis=1)]
feature0 = feature[(feature != 0).all(1)]
feature0.shape, feature.shape
print(feature0.shape, feature.shape)
coefs =feature0.sort_values(by=['coefs'])
plt.figure(figsize=(20, 15))
g = sns.barplot(x="genes", y="coefs", data=coefs, color= "lightblue")
g.figsize=(16,10)
plt.xticks(rotation=45)
feature0
# FUNCIONA LASSO
X = train[['MSSubClass', 'LotArea', 'OverallQual']]
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
model_lasso = Lasso(alpha=0.01)
model_lasso.fit(X_train, y_train)
y_pred= model_lasso.predict(X_test)
print('Predictions with Polynomial Regression')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
# LASSO PIPELINE FUNCIONA
X = train[['MSSubClass','LotArea','OverallQual','LotFrontage']]#LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
imp_mean = SimpleImputer(missing_values =np.nan, strategy='mean')
columns_imp_mean = ['LotFrontage']
scaler = StandardScaler()
column_trans = make_column_transformer(
(imp_mean,columns_imp_mean),
remainder = 'passthrough')
ML_model = Lasso(alpha=0.01)
pipe = make_pipeline(column_trans, ML_model)
print(cross_val_score(pipe,X_train,y_train,cv=5))
pipe.fit(X_train,y_train)
y_pred= pipe.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
# primero hace la división de cross-validation y después hace el pipeline,
# La diferencia de hacerlo así es que entonces cuando toma promedios para calcular como llenar los missing values,
# estos promedios son con respecto al cross-validation
cross_val_score(pipe,X,y,cv=5,scoring='accuracy').mean()
# FUNCIONA PIPELINE LASSO WITH
nominal = ["MSZoning", "LotShape", "LandContour", "LotConfig", "Neighborhood",
"Condition1", "BldgType", "RoofStyle",
"Foundation", "CentralAir", "SaleType", "SaleCondition"]
ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
"ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
"KitchenQual", "Functional", "GarageCond", "PavedDrive"]
numerical = ["LotFrontage", "LotArea", "MasVnrArea", "BsmtFinSF1", "BsmtUnfSF",
"TotalBsmtSF", "1stFlrSF", "2ndFlrSF", "GrLivArea", "GarageArea",
"OpenPorchSF"]
X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
# here we are going to instantiate a ColumnTransformer object with a list of tuples
# each of which has a the name of the preprocessor
# the transformation pipeline (could be a transformer)
# and the list of column names we wish to transform
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
## If you want to test this pipeline run the following code
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
("scaler", StandardScaler()), # No mejora la estimación escalando
("estimator", LinearRegression())
])
complete_pipeline.fit(X_train, y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
# FUNCIONA PIPELINE LASSO WITH
nominal = ["MSZoning", "LotShape", "LandContour", "LotConfig", "Neighborhood",
"Condition1", "BldgType", "RoofStyle",
"Foundation", "CentralAir", "SaleType", "SaleCondition"]
ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
"ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
"KitchenQual", "Functional", "GarageCond", "PavedDrive"]
numerical = ["LotFrontage", "LotArea", "MasVnrArea", "BsmtFinSF1", "BsmtUnfSF",
"TotalBsmtSF", "1stFlrSF", "2ndFlrSF", "GrLivArea", "GarageArea",
"OpenPorchSF"]
X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
# here we are going to instantiate a ColumnTransformer object with a list of tuples
# each of which has a the name of the preprocessor
# the transformation pipeline (could be a transformer)
# and the list of column names we wish to transform
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
## If you want to test this pipeline run the following code
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
from sklearn.linear_model import LinearRegression
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
("estimator", LinearRegression())
])
complete_pipeline.fit(X_train, y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
```
*A general rule of thumb: drop a dummy-encoded column if using a linear-based model, and do not drop it if using a tree-based model*
```
true_value = y_test
predicted_value = y_pred
plt.figure(figsize=(10,10))
plt.scatter(true_value, predicted_value, c='crimson')
# plt.yscale('log')
# plt.xscale('log')
p1 = max(max(predicted_value), max(true_value))
p2 = min(min(predicted_value), min(true_value))
plt.plot([p1, p2], [p1, p2], 'b-')
plt.xlabel('True Values', fontsize=15)
plt.ylabel('Predictions', fontsize=15)
plt.axis('equal')
plt.show()
```
The next cell is from [here](https://mahmoudyusof.github.io/general/scikit-learn-pipelines/)
```
# The next cell is from https://mahmoudyusof.github.io/general/scikit-learn-pipelines/
train_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
## let's create a validation set from the training set
msk = np.random.rand(len(train_df)) < 0.8
val_df = train_df[~msk]
train_df = train_df[msk]
nominal = ["MSZoning", "LotShape", "LandContour", "LotConfig", "Neighborhood",
"Condition1", "BldgType", "RoofStyle",
"Foundation", "CentralAir", "SaleType", "SaleCondition"]
ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
"ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
"KitchenQual", "Functional", "GarageCond", "PavedDrive"]
numerical = ["LotFrontage", "LotArea", "MasVnrArea", "BsmtFinSF1", "BsmtUnfSF",
"TotalBsmtSF", "1stFlrSF", "2ndFlrSF", "GrLivArea", "GarageArea",
"OpenPorchSF"]
train_features = train_df[nominal + ordinal + numerical]
train_label = train_df["SalePrice"]
val_features = val_df[nominal + ordinal + numerical]
val_label = val_df["SalePrice"]
test_features = test_df[nominal + ordinal + numerical]
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OrdinalEncoder, OneHotEncoder
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
from sklearn.compose import ColumnTransformer
# here we are going to instantiate a ColumnTransformer object with a list of tuples
# each of which has a the name of the preprocessor
# the transformation pipeline (could be a transformer)
# and the list of column names we wish to transform
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
## If you want to test this pipeline run the following code
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
from sklearn.linear_model import LinearRegression
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
("estimator", LinearRegression())
])
complete_pipeline.fit(train_features, train_label)
# score = complete_pipeline.score(val_features, val_label)
# print(score)
# predictions = complete_pipeline.predict(test_features)
# pipe = make_pipeline(column_trans, ML_model)
# print(cross_val_score(complete_pipeline,X_train,y_train,cv=5))
# pipe.fit(X_train,y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
# LASSO PIPELINE FUNCIONA
X = train[['MSSubClass','LotArea','OverallQual','LotFrontage']]#LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
imp_mean = SimpleImputer(missing_values =np.nan, strategy='mean')
columns_imp_mean = ['LotFrontage']
scaler = StandardScaler()
column_trans = make_column_transformer(
(imp_mean,columns_imp_mean),
remainder = 'passthrough')
ML_model = Lasso(alpha=0.01)
pipe = make_pipeline(column_trans, ML_model)
print(cross_val_score(pipe,X_train,y_train,cv=5))
pipe.fit(X_train,y_train)
y_pred= pipe.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
```
|
github_jupyter
|
# Se cargan las librerías que se van a utilizar en ambos ejemplos
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from sklearn.impute import SimpleImputer
from sklearn.compose import make_column_transformer
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures # <------ library to perform Polynomial Regression
from sklearn.linear_model import Ridge
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.linear_model import Lasso
from sklearn.model_selection import GridSearchCV
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OrdinalEncoder, OneHotEncoder
pd.set_option('display.max_rows', 90) # by default is 10, if change to None print ALL
pd.set_option('display.max_columns', 90) # by default is 10, if change to None print ALL
## 1) EXTRAER DATOS
# Los datos pueden encontrarse en diferentes formatos, en nuestro caso están en formato csv.
# Se carga la base de datos
train = pd.read_csv('train.csv') #Se encuentra en la misma carpeta que el jupyter notebook
test = pd.read_csv('test.csv') #Se encuentra en la misma carpeta que el jupyter notebook
print(train.shape)
print(test.shape)
train
col_plus_50percent_null = train.isnull().sum()[train.isnull().sum()>train.shape[0]/2]
col_plus_50percent_null
test.isnull().sum()[test.isnull().sum()>test.shape[0]/2]
features_drop = ['PoolQC','MiscFeature','Alley','Fence']
train = train.drop(features_drop, axis=1)
test = test.drop(features_drop, axis=1)
col_plus_50percent_null = train.isnull().sum()[train.isnull().sum()>train.shape[0]/2]
col_plus_50percent_null
test.isnull().sum()[test.isnull().sum()>test.shape[0]/2]
numerical = train.select_dtypes(include=np.number).columns.tolist()
numerical.remove('Id')
numerical.remove('SalePrice')
nominal = train.select_dtypes(exclude=np.number).columns.tolist()
# ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
# "ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
# "KitchenQual", "Functional", "GarageCond", "PavedDrive"]
ordinal = []
X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_REAL_test = test[nominal + ordinal + numerical]
# Pipeline datos ordinales
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
# Pipeline datos nominales
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
# Pipeline datos numéricos
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
# Pegado de los tres pipelines
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
# ML_model = Lasso(alpha=190)
# ML_model = Ridge(alpha=20)
ML_model = LinearRegression()
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
("estimator", ML_model)
])
complete_pipeline
complete_pipeline.fit(X_train, y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score:', r2_score(y_test, y_pred))
p1 = max(max(y_pred), max(y_test))
p2 = min(min(y_pred), min(y_test))
plt.plot([p1, p2], [p1, p2], 'b-')
plt.scatter(y_test,y_pred)
y_REAL_test = complete_pipeline.predict(X_REAL_test)
pred=pd.DataFrame(y_REAL_test)
sub_df=pd.read_csv('sample_submission.csv')
datasets=pd.concat([sub_df['Id'],pred],axis=1)
datasets.columns=['Id','SalePrice']
datasets.to_csv('sample_submission.csv',index=False)
# FUNCIONA PIPELINE LASSO WITH
numerical = train.select_dtypes(include=np.number).columns.tolist()
numerical.remove('Id')
numerical.remove('SalePrice')
nominal = train.select_dtypes(exclude=np.number).columns.tolist()
ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
"ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
"KitchenQual", "Functional", "GarageCond", "PavedDrive"]
ordinal = []
X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
# here we are going to instantiate a ColumnTransformer object with a list of tuples
# each of which has a the name of the preprocessor
# the transformation pipeline (could be a transformer)
# and the list of column names we wish to transform
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
## If you want to test this pipeline run the following code
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
ML_model = Lasso(alpha=1)
ML_model = Ridge(alpha=.1)
# ML_model = LinearRegression()
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
# ("scaler", StandardScaler()), # No mejora la estimación escalando
# ('poly_features', PolynomialFeatures(degree=2)), # empeora con polynomal features
("estimator", ML_model)
])
complete_pipeline.fit(X_train, y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
p1 = max(max(y_pred), max(y_test))
p2 = min(min(y_pred), min(y_test))
plt.plot([p1, p2], [p1, p2], 'b-')
plt.scatter(y_test,y_pred)
# FUNCIONA PIPELINE LASSO WITH
numerical = train.select_dtypes(include=np.number).columns.tolist()
numerical.remove('Id')
numerical.remove('SalePrice')
nominal = train.select_dtypes(exclude=np.number).columns.tolist()
ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
"ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
"KitchenQual", "Functional", "GarageCond", "PavedDrive"]
ordinal = []
X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
# here we are going to instantiate a ColumnTransformer object with a list of tuples
# each of which has a the name of the preprocessor
# the transformation pipeline (could be a transformer)
# and the list of column names we wish to transform
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
## If you want to test this pipeline run the following code
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
ML_model = Lasso(alpha=190)
ML_model = LinearRegression()
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
# ("scaler", StandardScaler()), # No mejora la estimación escalando
# ('poly_features', PolynomialFeatures(degree=2)),
("estimator", LinearRegression())
])
complete_pipeline.fit(X_train, y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
p1 = max(max(y_pred), max(y_test))
p2 = min(min(y_pred), min(y_test))
plt.plot([p1, p2], [p1, p2], 'b-')
plt.scatter(y_test,y_pred)
aux = ct.fit_transform(X)
aux = pd.df(aux)
preprocessed_features = preprocessing_pipeline.fit_transform(X_train)
preprocessed_features
from sklearn.linear_model import Lasso
from sklearn.model_selection import GridSearchCV
lasso=Lasso()
parameters={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,10,20,30,35,40,45,50,55,100,200,300]}
lasso_regressor=GridSearchCV(lasso,parameters,scoring='neg_mean_squared_error',cv=5)
lasso_regressor.fit(preprocessing_pipeline.fit_transform(X_train),y_train)
print(lasso_regressor.best_params_)
print(lasso_regressor.best_score_)
parameters={'alpha':[100,150,170,180,190,200,220,250,300]}
ML_model=Lasso()
grid = GridSearchCV(ML_model,parameters,scoring='neg_mean_squared_error',cv=5)
grid.fit(preprocessing_pipeline.fit_transform(X_train),y_train)
# Convert the results of CV into a dataframe
results = pd.DataFrame(grid.cv_results_)[['params', 'mean_test_score', 'rank_test_score']]
results.sort_values('rank_test_score')
parameters={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,10,20,30,35,40,45,50,55,100,200,300]}
ML_model=Ridge()
grid = GridSearchCV(ML_model,parameters,scoring='neg_mean_squared_error',cv=5)
grid.fit(preprocessing_pipeline.fit_transform(X_train),y_train)
# Convert the results of CV into a dataframe
results = pd.DataFrame(grid.cv_results_)[['params', 'mean_test_score', 'rank_test_score']]
results.sort_values('rank_test_score')
model
# https://salvatore-raieli.medium.com/a-complete-guide-to-linear-regression-using-gene-expression-data-regularization-f980ba6b11f7
model = Lasso(alpha = 180)
model.fit(preprocessing_pipeline.fit_transform(X_train), y_train)
y_pred = complete_pipeline.predict(X_test)
coefs = model.coef_.flatten()
names = X_train.columns
genes = list(zip(names, coefs))
feature =pd.DataFrame(genes, columns = ["genes", "coefs"])
feature0 = feature.loc[(feature!=0).any(axis=1)]
feature0 = feature[(feature != 0).all(1)]
feature0.shape, feature.shape
print(feature0.shape, feature.shape)
coefs =feature0.sort_values(by=['coefs'])
plt.figure(figsize=(20, 15))
g = sns.barplot(x="genes", y="coefs", data=coefs, color= "lightblue")
g.figsize=(16,10)
plt.xticks(rotation=45)
feature0
# FUNCIONA LASSO
X = train[['MSSubClass', 'LotArea', 'OverallQual']]
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
model_lasso = Lasso(alpha=0.01)
model_lasso.fit(X_train, y_train)
y_pred= model_lasso.predict(X_test)
print('Predictions with Polynomial Regression')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
# LASSO PIPELINE FUNCIONA
X = train[['MSSubClass','LotArea','OverallQual','LotFrontage']]#LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
imp_mean = SimpleImputer(missing_values =np.nan, strategy='mean')
columns_imp_mean = ['LotFrontage']
scaler = StandardScaler()
column_trans = make_column_transformer(
(imp_mean,columns_imp_mean),
remainder = 'passthrough')
ML_model = Lasso(alpha=0.01)
pipe = make_pipeline(column_trans, ML_model)
print(cross_val_score(pipe,X_train,y_train,cv=5))
pipe.fit(X_train,y_train)
y_pred= pipe.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
# primero hace la división de cross-validation y después hace el pipeline,
# La diferencia de hacerlo así es que entonces cuando toma promedios para calcular como llenar los missing values,
# estos promedios son con respecto al cross-validation
cross_val_score(pipe,X,y,cv=5,scoring='accuracy').mean()
# FUNCIONA PIPELINE LASSO WITH
nominal = ["MSZoning", "LotShape", "LandContour", "LotConfig", "Neighborhood",
"Condition1", "BldgType", "RoofStyle",
"Foundation", "CentralAir", "SaleType", "SaleCondition"]
ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
"ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
"KitchenQual", "Functional", "GarageCond", "PavedDrive"]
numerical = ["LotFrontage", "LotArea", "MasVnrArea", "BsmtFinSF1", "BsmtUnfSF",
"TotalBsmtSF", "1stFlrSF", "2ndFlrSF", "GrLivArea", "GarageArea",
"OpenPorchSF"]
X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
# here we are going to instantiate a ColumnTransformer object with a list of tuples
# each of which has a the name of the preprocessor
# the transformation pipeline (could be a transformer)
# and the list of column names we wish to transform
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
## If you want to test this pipeline run the following code
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
("scaler", StandardScaler()), # No mejora la estimación escalando
("estimator", LinearRegression())
])
complete_pipeline.fit(X_train, y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
# FUNCIONA PIPELINE LASSO WITH
nominal = ["MSZoning", "LotShape", "LandContour", "LotConfig", "Neighborhood",
"Condition1", "BldgType", "RoofStyle",
"Foundation", "CentralAir", "SaleType", "SaleCondition"]
ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
"ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
"KitchenQual", "Functional", "GarageCond", "PavedDrive"]
numerical = ["LotFrontage", "LotArea", "MasVnrArea", "BsmtFinSF1", "BsmtUnfSF",
"TotalBsmtSF", "1stFlrSF", "2ndFlrSF", "GrLivArea", "GarageArea",
"OpenPorchSF"]
X = train[nominal + ordinal + numerical] #LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
# here we are going to instantiate a ColumnTransformer object with a list of tuples
# each of which has a the name of the preprocessor
# the transformation pipeline (could be a transformer)
# and the list of column names we wish to transform
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
## If you want to test this pipeline run the following code
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
from sklearn.linear_model import LinearRegression
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
("estimator", LinearRegression())
])
complete_pipeline.fit(X_train, y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
true_value = y_test
predicted_value = y_pred
plt.figure(figsize=(10,10))
plt.scatter(true_value, predicted_value, c='crimson')
# plt.yscale('log')
# plt.xscale('log')
p1 = max(max(predicted_value), max(true_value))
p2 = min(min(predicted_value), min(true_value))
plt.plot([p1, p2], [p1, p2], 'b-')
plt.xlabel('True Values', fontsize=15)
plt.ylabel('Predictions', fontsize=15)
plt.axis('equal')
plt.show()
# The next cell is from https://mahmoudyusof.github.io/general/scikit-learn-pipelines/
train_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
## let's create a validation set from the training set
msk = np.random.rand(len(train_df)) < 0.8
val_df = train_df[~msk]
train_df = train_df[msk]
nominal = ["MSZoning", "LotShape", "LandContour", "LotConfig", "Neighborhood",
"Condition1", "BldgType", "RoofStyle",
"Foundation", "CentralAir", "SaleType", "SaleCondition"]
ordinal = ["LandSlope", "OverallQual", "OverallCond", "YearRemodAdd",
"ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "BsmtExposure",
"KitchenQual", "Functional", "GarageCond", "PavedDrive"]
numerical = ["LotFrontage", "LotArea", "MasVnrArea", "BsmtFinSF1", "BsmtUnfSF",
"TotalBsmtSF", "1stFlrSF", "2ndFlrSF", "GrLivArea", "GarageArea",
"OpenPorchSF"]
train_features = train_df[nominal + ordinal + numerical]
train_label = train_df["SalePrice"]
val_features = val_df[nominal + ordinal + numerical]
val_label = val_df["SalePrice"]
test_features = test_df[nominal + ordinal + numerical]
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OrdinalEncoder, OneHotEncoder
ordinal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OrdinalEncoder())
])
nominal_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="most_frequent")),
("encoder", OneHotEncoder(sparse=True, handle_unknown="ignore"))
])
numerical_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="mean")),
("scaler", StandardScaler())
])
from sklearn.compose import ColumnTransformer
# here we are going to instantiate a ColumnTransformer object with a list of tuples
# each of which has a the name of the preprocessor
# the transformation pipeline (could be a transformer)
# and the list of column names we wish to transform
preprocessing_pipeline = ColumnTransformer([
("nominal_preprocessor", nominal_pipeline, nominal),
("ordinal_preprocessor", ordinal_pipeline, ordinal),
("numerical_preprocessor", numerical_pipeline, numerical)
])
## If you want to test this pipeline run the following code
# preprocessed_features = preprocessing_pipeline.fit_transform(train_features)
from sklearn.linear_model import LinearRegression
complete_pipeline = Pipeline([
("preprocessor", preprocessing_pipeline),
("estimator", LinearRegression())
])
complete_pipeline.fit(train_features, train_label)
# score = complete_pipeline.score(val_features, val_label)
# print(score)
# predictions = complete_pipeline.predict(test_features)
# pipe = make_pipeline(column_trans, ML_model)
# print(cross_val_score(complete_pipeline,X_train,y_train,cv=5))
# pipe.fit(X_train,y_train)
y_pred = complete_pipeline.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
# LASSO PIPELINE FUNCIONA
X = train[['MSSubClass','LotArea','OverallQual','LotFrontage']]#LotFrontage y MasVnrType tiene NaNs
y = train['SalePrice']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
imp_mean = SimpleImputer(missing_values =np.nan, strategy='mean')
columns_imp_mean = ['LotFrontage']
scaler = StandardScaler()
column_trans = make_column_transformer(
(imp_mean,columns_imp_mean),
remainder = 'passthrough')
ML_model = Lasso(alpha=0.01)
pipe = make_pipeline(column_trans, ML_model)
print(cross_val_score(pipe,X_train,y_train,cv=5))
pipe.fit(X_train,y_train)
y_pred= pipe.predict(X_test)
print('ERRORS OF PREDICTIONS')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('r2_score', r2_score(y_test, y_pred))
plt.scatter(y_test,y_pred)
| 0.516839 | 0.813905 |
# Overall Pipeline
간단한 파이프라인은 아래와 같이 구성됩니다.
```
%load_ext lab_black
# !pip install lxml
# !pip install beautifulsoup4
# !pip install hgtk
# !pip install jamo
# !pip install Unidecode
# !pip install inflect
# !pip install librosa
# !pip install matplotlib
# !sudo apt-get install libsndfile1
import os
import sys
import json
import requests
import re
import urllib.request
import soundfile as sf
import sys
sys.path.insert(0, "./voice_synthesis/tacotron2")
sys.path.insert(0, "./voice_synthesis/waveglow")
from summarization.inference import get_summarized_text
from voice.preprocessing.kor_transliterator import Transliterator
from voice_synthesis.inference import Synthesizer
from bs4 import BeautifulSoup
class VoiceSummarizer:
""" Simple Voice Summarizer Demo """
def __init__(self, checkpoint_path, n_enc, n_dec):
self.ckpt_kobart = checkpoint_path["kobart"]
self.ckpt_tacotron = checkpoint_path["tacotron"]
self.ckpt_waveglow = checkpoint_path["waveglow"]
self.n_enc = n_enc
self.n_dec = n_dec
assert self.ckpt_kobart, "You should enter KoBART checkpoint path."
assert self.ckpt_tacotron, "You should enter Tacotron checkpoint path."
assert self.ckpt_waveglow, "You should enter Waveglow checkpoint path."
def get_news_content(self, url):
resp = requests.get(url)
soup = BeautifulSoup(resp.text, "lxml")
content = ""
if re.search(r"daum", url) != None:
tag = "div#harmonyContainer p"
elif re.search(r"yna", url) != None:
tag = "div[class=scroller01] p"
elif re.search(r"joins", url) != None:
tag = "div#article_body"
for p in soup.select(tag):
content += p.get_text()
return content
def summarize(self, text: str) -> str:
return get_summarized_text(
self.ckpt_kobart, text, n_enc=self.n_enc, n_dec=self.n_dec
)
def transliterate(self, text: str) -> str:
text = re.sub("[-=+,#/\?:^$.@*\"※~&%ㆍ·!』\\‘|\(\)\[\]\<\>`'…》]", " ", text)
text = text.replace(" ", " ")
return Transliterator(text).transliterate()
def synthesize(self, text: str) -> None:
synthesizer = Synthesizer(self.ckpt_tacotron, self.ckpt_waveglow)
audio, sampling_rate = synthesizer.inference(text)
sf.write("./summary.wav", audio, sampling_rate)
def run(self, url):
news = self.get_news_content(url)
summarized = self.summarize(news)
transliterated = self.transliterate(summarized)
self.synthesize(transliterated)
return news, summarized, transliterated
search_url = "https://www.yna.co.kr/view/AKR20210610125900057?section=industry/all"
checkpoint_path = {
# "kobart": "./summarization/checkpoints/version-16/best_model_step_40730_loss_1.8838.pt",
"kobart": "./summarization/checkpoints/version-22/best_model_step_65168_loss_2.2167.pt",
"tacotron": "./voice_synthesis/tacotron2/output/checkpoint_680000",
"waveglow": "./voice_synthesis/waveglow/checkpoints/waveglow_1514000",
}
n_kobart_enc = 6
n_kobart_dec = 3
summarizer = VoiceSummarizer(checkpoint_path, n_kobart_enc, n_kobart_dec)
original, summary, transliterated = summarizer.run(search_url)
original
summary
transliterated
```
|
github_jupyter
|
%load_ext lab_black
# !pip install lxml
# !pip install beautifulsoup4
# !pip install hgtk
# !pip install jamo
# !pip install Unidecode
# !pip install inflect
# !pip install librosa
# !pip install matplotlib
# !sudo apt-get install libsndfile1
import os
import sys
import json
import requests
import re
import urllib.request
import soundfile as sf
import sys
sys.path.insert(0, "./voice_synthesis/tacotron2")
sys.path.insert(0, "./voice_synthesis/waveglow")
from summarization.inference import get_summarized_text
from voice.preprocessing.kor_transliterator import Transliterator
from voice_synthesis.inference import Synthesizer
from bs4 import BeautifulSoup
class VoiceSummarizer:
""" Simple Voice Summarizer Demo """
def __init__(self, checkpoint_path, n_enc, n_dec):
self.ckpt_kobart = checkpoint_path["kobart"]
self.ckpt_tacotron = checkpoint_path["tacotron"]
self.ckpt_waveglow = checkpoint_path["waveglow"]
self.n_enc = n_enc
self.n_dec = n_dec
assert self.ckpt_kobart, "You should enter KoBART checkpoint path."
assert self.ckpt_tacotron, "You should enter Tacotron checkpoint path."
assert self.ckpt_waveglow, "You should enter Waveglow checkpoint path."
def get_news_content(self, url):
resp = requests.get(url)
soup = BeautifulSoup(resp.text, "lxml")
content = ""
if re.search(r"daum", url) != None:
tag = "div#harmonyContainer p"
elif re.search(r"yna", url) != None:
tag = "div[class=scroller01] p"
elif re.search(r"joins", url) != None:
tag = "div#article_body"
for p in soup.select(tag):
content += p.get_text()
return content
def summarize(self, text: str) -> str:
return get_summarized_text(
self.ckpt_kobart, text, n_enc=self.n_enc, n_dec=self.n_dec
)
def transliterate(self, text: str) -> str:
text = re.sub("[-=+,#/\?:^$.@*\"※~&%ㆍ·!』\\‘|\(\)\[\]\<\>`'…》]", " ", text)
text = text.replace(" ", " ")
return Transliterator(text).transliterate()
def synthesize(self, text: str) -> None:
synthesizer = Synthesizer(self.ckpt_tacotron, self.ckpt_waveglow)
audio, sampling_rate = synthesizer.inference(text)
sf.write("./summary.wav", audio, sampling_rate)
def run(self, url):
news = self.get_news_content(url)
summarized = self.summarize(news)
transliterated = self.transliterate(summarized)
self.synthesize(transliterated)
return news, summarized, transliterated
search_url = "https://www.yna.co.kr/view/AKR20210610125900057?section=industry/all"
checkpoint_path = {
# "kobart": "./summarization/checkpoints/version-16/best_model_step_40730_loss_1.8838.pt",
"kobart": "./summarization/checkpoints/version-22/best_model_step_65168_loss_2.2167.pt",
"tacotron": "./voice_synthesis/tacotron2/output/checkpoint_680000",
"waveglow": "./voice_synthesis/waveglow/checkpoints/waveglow_1514000",
}
n_kobart_enc = 6
n_kobart_dec = 3
summarizer = VoiceSummarizer(checkpoint_path, n_kobart_enc, n_kobart_dec)
original, summary, transliterated = summarizer.run(search_url)
original
summary
transliterated
| 0.40439 | 0.464234 |
This notebook presents the Data Analysis for the dataset used
```
from src.recommender import parse_json
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
```
Reading the data from disk
```
reviews = parse_json("./data/australian_user_reviews.json")
user_items = parse_json("./data/australian_users_items.json")
steam_games = parse_json("./data/steam_games.json")
```
We start by taking a look at our users.
We want to know how many items our users usually posses.
```
game_bin = [-1, 0, 25, 50, 100, 500, 8000]
user_item_count = user_items[["user_id", "items_count"]].copy()
user_item_count = user_item_count.sort_values("items_count")
print(user_item_count)
avg_usr_item_amount = user_item_count["items_count"].loc[(user_item_count["items_count"]!=0)].mean()
print("Average items in user inventory", avg_usr_item_amount)
print(f" -> recall@k bounded by k/{avg_usr_item_amount}")
user_item_count["items_count"] = pd.cut(user_item_count["items_count"], game_bin).apply(str)
user_item_count = user_item_count.groupby("items_count").count()
x = user_item_count["user_id"].index.to_numpy()
y = user_item_count["user_id"].to_numpy()
plt.bar(x, y, color="orange")
plt.title("Amount of games in user inventory")
plt.xlabel("Amount of games")
plt.ylabel("Amount of users")
plt.show()
```
Now we check how many reviews our users usually have made.
```
review_bins = [0, 1, 3, 5, 10]
user_review_count = reviews[["user_id", "reviews"]].copy()
user_review_count["reviews_count"] = user_review_count["reviews"].apply(len)
user_review_count = user_review_count.sort_values("reviews_count")
print(user_review_count[["user_id", "reviews_count"]])
user_review_count["reviews_count"] = pd.cut(user_review_count["reviews_count"], review_bins).apply(str)
user_review_count = user_review_count.groupby("reviews_count").count()
x = user_review_count["user_id"].index.to_numpy()
y = user_review_count["user_id"].to_numpy()
plt.bar(x, y, color="orange")
plt.title("Amount of users based on amount of reviews")
plt.xlabel("Amount of reviews")
plt.ylabel("Amount of users")
plt.show()
```
Let's now look at the games, first we want to find out which are the most popular games.
```
game_popularity = user_items[["user_id", "items"]].copy()
game_popularity = game_popularity.explode("items", ignore_index=True)
game_popularity.dropna(inplace=True)
game_popularity["items"] = game_popularity["items"].apply(lambda x: x["item_name"])
game_popularity = game_popularity.groupby("items").count()
game_popularity = game_popularity.sort_values("user_id", ascending=False)["user_id"]
print(game_popularity)
plt.bar(game_popularity[:10].index, game_popularity[:10].values, color="orange")
plt.tick_params(axis="x", rotation=90)
plt.title("Game popularity (top 10)")
plt.xlabel("Games")
plt.ylabel("Amount of users that own game")
plt.show()
```
Checking popularity based results
```
# compute top k popularity based recommender
k = 10
ground_truth = user_items[["user_id", "items"]].copy()
ground_truth["items"] = ground_truth["items"].apply(lambda x: [i["item_id"] for i in x])
ground_truth.dropna(inplace=True)
game_review_popularity = reviews[["user_id", "reviews"]].copy()
game_review_popularity["reviews"] = game_review_popularity["reviews"].apply(lambda x: [i["item_id"] for i in x])
game_review_popularity.dropna(inplace=True)
n_game_pop = game_review_popularity["reviews"].explode()
n_game_pop.dropna(inplace=True)
n_game_pop = n_game_pop.value_counts()
game_review_popularity["recommendations"] = game_review_popularity["reviews"].apply(lambda x: [rec for rec in n_game_pop.index if rec not in x][:10])
# print(recommendations)
# drop all rows with no items (nothing to compare against)
ground_truth = ground_truth.merge(game_review_popularity, on=['user_id'])
ground_truth['items'] = ground_truth.apply(lambda row: list(set(row['items']).difference(set(row['reviews']))), axis=1)
ground_truth.drop(ground_truth[~ground_truth['items'].astype(bool)].index, inplace=True)
# compute nDCG@k
ground_truth['nDCG@k'] = ground_truth.apply(lambda row: np.sum([int(rec in row['items'])/(np.log2(i+2)) for i, rec in enumerate(row["recommendations"])]), axis=1)
ground_truth['nDCG@k'] = ground_truth.apply(lambda row: row['nDCG@k']/np.sum([1/(np.log2(i+2)) for i in range(min(len(row["recommendations"]), len(row['items'])))]), axis=1)
# compute recall@k
ground_truth['items'] = ground_truth['items'].apply(set)
ground_truth["recommendations"] = ground_truth["recommendations"].apply(set)
ground_truth['recall@k'] = ground_truth.apply(lambda row: len(row["recommendations"].intersection(row['items']))/len(row['items']), axis=1)
ground_truth['ideal_recall@k'] = ground_truth.apply(lambda row: min(len(row['items']), len(row["recommendations"]))/len(row['items']), axis=1)
print(f"Popularity based top {k} recommender achieves: nDCG@{k}: {ground_truth['nDCG@k'].mean()}, recall@{k}: {ground_truth['recall@k'].mean()}")
print(f"The ideal recall@{k} = {ground_truth['ideal_recall@k'].mean()}, which makes nrecall@{k}: {ground_truth['recall@k'].mean()/ground_truth['ideal_recall@k'].mean()}")
print(f"Total amount of games on steam: {steam_games.shape[0]} with {game_popularity.shape[0]} that are actually owned in our dataset")
```
Analysing the tags in our data
Let's first take a look at which tags are present.
```
game_tags = steam_games[["app_name", "genres", "tags", "specs"]].copy()
game_tags["genres"] = game_tags["genres"].fillna("").apply(set)
genre_set = set()
for s in game_tags.genres.values:
genre_set.update(s)
game_tags["tags"] = game_tags["tags"].fillna("").apply(set)
tags_set = set()
for s in game_tags.tags.values:
tags_set.update(s)
game_tags["specs"] = game_tags["specs"].fillna("").apply(set)
specs_set = set()
for s in game_tags.specs.values:
specs_set.update(s)
all_tags = set.union(genre_set, tags_set, specs_set)
print(genre_set, "\n")
print(tags_set, "\n")
print(specs_set, "\n")
print(all_tags, "\n")
```
Now let's see which are the most popular tags
```
tag_amount = game_tags.apply(lambda x: set.union(x.genres, x.tags), axis=1).explode()
tag_popularity = tag_amount.value_counts()
tag_mean = tag_popularity.mean()
print("Average tag occurrence count", tag_mean)
print(tag_popularity)
plt.bar(tag_popularity[:10].index, tag_popularity[:10].values, color="orange")
plt.tick_params(axis="x", rotation=45)
plt.title("Game tag popularity (top 10)")
plt.xlabel("Game tags")
plt.ylabel("Amount of games with tag")
plt.show()
```
Checking tag frequency per game
```
print("Amount of tags:", len(set.union(tags_set, genre_set)))
game_tags["#tag"] = game_tags.apply(lambda x: len(set.union(x.genres, x.tags)), axis=1)
print(game_tags[["app_name", "#tag"]].sort_values("#tag"))
tag_bin = [0, 1, 3, 5, 10, 25, 40]
game_tags["binned_tags"] = pd.cut(game_tags["#tag"], tag_bin).apply(str)
game_tags = game_tags.groupby("binned_tags").count()
x = game_tags["app_name"].index.to_numpy()
y = game_tags["app_name"].to_numpy()
plt.bar(x, y, color="orange")
plt.title("Amount of games per tag")
plt.xlabel("Amount of tags")
plt.ylabel("Amount of games")
plt.show()
```
|
github_jupyter
|
from src.recommender import parse_json
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
reviews = parse_json("./data/australian_user_reviews.json")
user_items = parse_json("./data/australian_users_items.json")
steam_games = parse_json("./data/steam_games.json")
game_bin = [-1, 0, 25, 50, 100, 500, 8000]
user_item_count = user_items[["user_id", "items_count"]].copy()
user_item_count = user_item_count.sort_values("items_count")
print(user_item_count)
avg_usr_item_amount = user_item_count["items_count"].loc[(user_item_count["items_count"]!=0)].mean()
print("Average items in user inventory", avg_usr_item_amount)
print(f" -> recall@k bounded by k/{avg_usr_item_amount}")
user_item_count["items_count"] = pd.cut(user_item_count["items_count"], game_bin).apply(str)
user_item_count = user_item_count.groupby("items_count").count()
x = user_item_count["user_id"].index.to_numpy()
y = user_item_count["user_id"].to_numpy()
plt.bar(x, y, color="orange")
plt.title("Amount of games in user inventory")
plt.xlabel("Amount of games")
plt.ylabel("Amount of users")
plt.show()
review_bins = [0, 1, 3, 5, 10]
user_review_count = reviews[["user_id", "reviews"]].copy()
user_review_count["reviews_count"] = user_review_count["reviews"].apply(len)
user_review_count = user_review_count.sort_values("reviews_count")
print(user_review_count[["user_id", "reviews_count"]])
user_review_count["reviews_count"] = pd.cut(user_review_count["reviews_count"], review_bins).apply(str)
user_review_count = user_review_count.groupby("reviews_count").count()
x = user_review_count["user_id"].index.to_numpy()
y = user_review_count["user_id"].to_numpy()
plt.bar(x, y, color="orange")
plt.title("Amount of users based on amount of reviews")
plt.xlabel("Amount of reviews")
plt.ylabel("Amount of users")
plt.show()
game_popularity = user_items[["user_id", "items"]].copy()
game_popularity = game_popularity.explode("items", ignore_index=True)
game_popularity.dropna(inplace=True)
game_popularity["items"] = game_popularity["items"].apply(lambda x: x["item_name"])
game_popularity = game_popularity.groupby("items").count()
game_popularity = game_popularity.sort_values("user_id", ascending=False)["user_id"]
print(game_popularity)
plt.bar(game_popularity[:10].index, game_popularity[:10].values, color="orange")
plt.tick_params(axis="x", rotation=90)
plt.title("Game popularity (top 10)")
plt.xlabel("Games")
plt.ylabel("Amount of users that own game")
plt.show()
# compute top k popularity based recommender
k = 10
ground_truth = user_items[["user_id", "items"]].copy()
ground_truth["items"] = ground_truth["items"].apply(lambda x: [i["item_id"] for i in x])
ground_truth.dropna(inplace=True)
game_review_popularity = reviews[["user_id", "reviews"]].copy()
game_review_popularity["reviews"] = game_review_popularity["reviews"].apply(lambda x: [i["item_id"] for i in x])
game_review_popularity.dropna(inplace=True)
n_game_pop = game_review_popularity["reviews"].explode()
n_game_pop.dropna(inplace=True)
n_game_pop = n_game_pop.value_counts()
game_review_popularity["recommendations"] = game_review_popularity["reviews"].apply(lambda x: [rec for rec in n_game_pop.index if rec not in x][:10])
# print(recommendations)
# drop all rows with no items (nothing to compare against)
ground_truth = ground_truth.merge(game_review_popularity, on=['user_id'])
ground_truth['items'] = ground_truth.apply(lambda row: list(set(row['items']).difference(set(row['reviews']))), axis=1)
ground_truth.drop(ground_truth[~ground_truth['items'].astype(bool)].index, inplace=True)
# compute nDCG@k
ground_truth['nDCG@k'] = ground_truth.apply(lambda row: np.sum([int(rec in row['items'])/(np.log2(i+2)) for i, rec in enumerate(row["recommendations"])]), axis=1)
ground_truth['nDCG@k'] = ground_truth.apply(lambda row: row['nDCG@k']/np.sum([1/(np.log2(i+2)) for i in range(min(len(row["recommendations"]), len(row['items'])))]), axis=1)
# compute recall@k
ground_truth['items'] = ground_truth['items'].apply(set)
ground_truth["recommendations"] = ground_truth["recommendations"].apply(set)
ground_truth['recall@k'] = ground_truth.apply(lambda row: len(row["recommendations"].intersection(row['items']))/len(row['items']), axis=1)
ground_truth['ideal_recall@k'] = ground_truth.apply(lambda row: min(len(row['items']), len(row["recommendations"]))/len(row['items']), axis=1)
print(f"Popularity based top {k} recommender achieves: nDCG@{k}: {ground_truth['nDCG@k'].mean()}, recall@{k}: {ground_truth['recall@k'].mean()}")
print(f"The ideal recall@{k} = {ground_truth['ideal_recall@k'].mean()}, which makes nrecall@{k}: {ground_truth['recall@k'].mean()/ground_truth['ideal_recall@k'].mean()}")
print(f"Total amount of games on steam: {steam_games.shape[0]} with {game_popularity.shape[0]} that are actually owned in our dataset")
game_tags = steam_games[["app_name", "genres", "tags", "specs"]].copy()
game_tags["genres"] = game_tags["genres"].fillna("").apply(set)
genre_set = set()
for s in game_tags.genres.values:
genre_set.update(s)
game_tags["tags"] = game_tags["tags"].fillna("").apply(set)
tags_set = set()
for s in game_tags.tags.values:
tags_set.update(s)
game_tags["specs"] = game_tags["specs"].fillna("").apply(set)
specs_set = set()
for s in game_tags.specs.values:
specs_set.update(s)
all_tags = set.union(genre_set, tags_set, specs_set)
print(genre_set, "\n")
print(tags_set, "\n")
print(specs_set, "\n")
print(all_tags, "\n")
tag_amount = game_tags.apply(lambda x: set.union(x.genres, x.tags), axis=1).explode()
tag_popularity = tag_amount.value_counts()
tag_mean = tag_popularity.mean()
print("Average tag occurrence count", tag_mean)
print(tag_popularity)
plt.bar(tag_popularity[:10].index, tag_popularity[:10].values, color="orange")
plt.tick_params(axis="x", rotation=45)
plt.title("Game tag popularity (top 10)")
plt.xlabel("Game tags")
plt.ylabel("Amount of games with tag")
plt.show()
print("Amount of tags:", len(set.union(tags_set, genre_set)))
game_tags["#tag"] = game_tags.apply(lambda x: len(set.union(x.genres, x.tags)), axis=1)
print(game_tags[["app_name", "#tag"]].sort_values("#tag"))
tag_bin = [0, 1, 3, 5, 10, 25, 40]
game_tags["binned_tags"] = pd.cut(game_tags["#tag"], tag_bin).apply(str)
game_tags = game_tags.groupby("binned_tags").count()
x = game_tags["app_name"].index.to_numpy()
y = game_tags["app_name"].to_numpy()
plt.bar(x, y, color="orange")
plt.title("Amount of games per tag")
plt.xlabel("Amount of tags")
plt.ylabel("Amount of games")
plt.show()
| 0.23855 | 0.818519 |
# Example using Tensorboard and a simple model
```
from tensorflow.keras.preprocessing import image_dataset_from_directory
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Rescaling, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.callbacks import TensorBoard
from pathlib import Path
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
```
## Settings
```
batch_size = 1
img_size = 32
epochs = 10
seed = 27
validation_split = 0.2
wd = Path('../..')
dataset_path = Path(wd, 'data/dataset_example')
train_path = Path(dataset_path, "train")
test_path = Path(dataset_path, "test")
model_path = Path(wd, "models/example_cnn.h5")
plots_path = Path(wd, "plots")
log_dir = Path(wd, "logs", datetime.now().strftime("%Y%m%d-%H%M%S"))
class_names = ['amanita', 'boletus']
```
## Load data
```
training_data = image_dataset_from_directory(
directory=train_path,
validation_split=validation_split,
subset='training',
labels='inferred',
class_names=class_names,
label_mode='binary',
batch_size=batch_size,
image_size=(img_size, img_size),
seed=seed,
shuffle=True
)
validation_data = image_dataset_from_directory(
directory=train_path,
validation_split=validation_split,
subset='validation',
labels='inferred',
class_names=class_names,
label_mode='binary',
batch_size=batch_size,
image_size=(img_size, img_size),
seed=seed,
shuffle=True
)
test_data = image_dataset_from_directory(
directory=test_path,
labels='inferred',
class_names=class_names,
label_mode='binary',
batch_size=batch_size,
image_size=(img_size, img_size),
seed=seed,
shuffle=True
)
```
## Define model
```
model = Sequential([
Rescaling(
scale=1./255,
input_shape=(img_size, img_size, 3)
),
Conv2D(
filters=64,
kernel_size=(3, 3),
padding='same',
activation='relu'
),
MaxPooling2D(
pool_size=(2, 2),
strides=(2, 2)
),
Flatten(),
Dense(
units=512,
activation='relu'
),
Dropout(
rate=0.2
),
Dense(
units=1,
activation='sigmoid'
)
])
```
## Compile model
```
model.compile(
optimizer=SGD(),
loss=BinaryCrossentropy(),
metrics=['accuracy']
)
```
## Define Tensorboard callback
```
tensorboard_callback = TensorBoard(
log_dir=log_dir,
histogram_freq=1
)
```
## Train model
```
history = model.fit(
training_data,
validation_data=validation_data,
epochs=epochs,
callbacks=[tensorboard_callback]
)
```
## Save model
```
model.save(model_path)
```
## Accuracy and loss plot for training and validation
```
metrics_df = pd.DataFrame(history.history)
metrics_df[["accuracy", "val_accuracy"]].plot()
plt.title('Training and Validation Accuracy')
plt.savefig(Path(plots_path, 'accuracy.png'))
plt.show()
metrics_df[["loss", "val_loss"]].plot()
plt.title('Training and Validation Loss')
plt.savefig(Path(plots_path, 'loss.png'))
plt.show()
```
## Evaluate model
```
model.evaluate(test_data)
```
|
github_jupyter
|
from tensorflow.keras.preprocessing import image_dataset_from_directory
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Rescaling, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.callbacks import TensorBoard
from pathlib import Path
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
batch_size = 1
img_size = 32
epochs = 10
seed = 27
validation_split = 0.2
wd = Path('../..')
dataset_path = Path(wd, 'data/dataset_example')
train_path = Path(dataset_path, "train")
test_path = Path(dataset_path, "test")
model_path = Path(wd, "models/example_cnn.h5")
plots_path = Path(wd, "plots")
log_dir = Path(wd, "logs", datetime.now().strftime("%Y%m%d-%H%M%S"))
class_names = ['amanita', 'boletus']
training_data = image_dataset_from_directory(
directory=train_path,
validation_split=validation_split,
subset='training',
labels='inferred',
class_names=class_names,
label_mode='binary',
batch_size=batch_size,
image_size=(img_size, img_size),
seed=seed,
shuffle=True
)
validation_data = image_dataset_from_directory(
directory=train_path,
validation_split=validation_split,
subset='validation',
labels='inferred',
class_names=class_names,
label_mode='binary',
batch_size=batch_size,
image_size=(img_size, img_size),
seed=seed,
shuffle=True
)
test_data = image_dataset_from_directory(
directory=test_path,
labels='inferred',
class_names=class_names,
label_mode='binary',
batch_size=batch_size,
image_size=(img_size, img_size),
seed=seed,
shuffle=True
)
model = Sequential([
Rescaling(
scale=1./255,
input_shape=(img_size, img_size, 3)
),
Conv2D(
filters=64,
kernel_size=(3, 3),
padding='same',
activation='relu'
),
MaxPooling2D(
pool_size=(2, 2),
strides=(2, 2)
),
Flatten(),
Dense(
units=512,
activation='relu'
),
Dropout(
rate=0.2
),
Dense(
units=1,
activation='sigmoid'
)
])
model.compile(
optimizer=SGD(),
loss=BinaryCrossentropy(),
metrics=['accuracy']
)
tensorboard_callback = TensorBoard(
log_dir=log_dir,
histogram_freq=1
)
history = model.fit(
training_data,
validation_data=validation_data,
epochs=epochs,
callbacks=[tensorboard_callback]
)
model.save(model_path)
metrics_df = pd.DataFrame(history.history)
metrics_df[["accuracy", "val_accuracy"]].plot()
plt.title('Training and Validation Accuracy')
plt.savefig(Path(plots_path, 'accuracy.png'))
plt.show()
metrics_df[["loss", "val_loss"]].plot()
plt.title('Training and Validation Loss')
plt.savefig(Path(plots_path, 'loss.png'))
plt.show()
model.evaluate(test_data)
| 0.839964 | 0.954095 |
```
from pathlib import Path
import pandas as pd
import numpy as np
import json
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', None)
def trim_all_columns(df):
"""
Trim whitespace from ends of each value across all series in dataframe
"""
trim_strings = lambda x: x.strip() if isinstance(x, str) else x
return df.applymap(trim_strings)
df_gpa = pd.read_csv("https://github.com/wadefagen/datasets/raw/master/gpa/uiuc-gpa-dataset.csv").rename(columns={"Year":"year","Term":"term","Subject":"subject","Number":"number","Primary Instructor":"instructor"})
df_gpa["instructor"] = df_gpa['instructor'].str.extract(r'(\w+, \w)')
df_gpa['total_students'] = df_gpa['A+'] + df_gpa['A'] + df_gpa['A-'] + df_gpa['B'] + df_gpa['B+'] + df_gpa['B-'] + df_gpa['C+'] + df_gpa['C'] + df_gpa['C-'] + df_gpa['D+'] + df_gpa['D'] + df_gpa['D-'] + df_gpa['F']
df_gpa['gpa'] = (df_gpa['A+'] * 4 + df_gpa['A'] * 4 + df_gpa['A-'] * 3.67 + df_gpa['B'] * 3 + df_gpa['B+'] * 3.33 + df_gpa['B-'] * 2.67 + df_gpa['C+'] * 2.33 + df_gpa['C'] * 2 + df_gpa['C-'] * 1.67 + df_gpa['D+'] * 1.33 + df_gpa['D'] + df_gpa['D-'] * 0.67) / df_gpa['total_students']
df_gpa = df_gpa.groupby(["year", "term", "subject", "number", "instructor"], as_index=False).agg({"gpa": "mean", "total_students": "sum", "A+": "sum", "A": "sum", "A-": "sum", "B+": "sum", "B": "sum", "B-": "sum", "C+": "sum", "C": "sum", "C-": "sum", "D+": "sum", "D": "sum", "D-": "sum", "F": "sum"})
df_gpa["course"] = df_gpa["subject"] + " " + df_gpa["number"].astype(str)
df_gpa
terms = [
"../raw/{}-{}.csv".format(year, term)
for year in range(df_gpa["year"].min(), df_gpa["year"].max() + 2)
for term in ["Winter", "Spring", "Summer", "Fall"]
if Path("../raw/{}-{}.csv".format(year, term)).is_file()
]
df_catalogs = pd.concat([pd.read_csv(term) for term in terms], ignore_index=True)
df_catalogs["term"] = pd.Categorical(df_catalogs["term"], ["Fall","Summer","Spring","Winter"], ordered=True)
df_catalogs.sort_values(by=["year", "term", "subject", "number", "crn", "meeting"], ascending=[False, True, True, True, True, True], ignore_index=True, inplace=True)
df_catalogs["course"] = df_catalogs["subject"] + " " + df_catalogs["number"].astype(str)
# Fix typos in descriptions
df_catalogs.loc[df_catalogs["course"]=="HIST 574", "description"]="Immerses students in major works of recent American religious history. Written from multiple disciplinary perspectives and wrestling with the knotty problems in which religion has been interwoven, these books will give the student a solid foundation in American religious history. 4 graduate hours. No professional credit."
df_catalogs.loc[df_catalogs["course"]=="ASST 104", "description"] = "Same as REL 104. See REL 104."
df_catalogs.loc[df_catalogs["course"]=="EPOL 551", "description"] = "Same as EOL 570. See EOL 570."
df_catalogs = trim_all_columns(df_catalogs).merge(df_gpa, how="left", on=["year", "term", "course", "subject", "number", "instructor"])
"""
df_catalogs["sectionId"] = (df_catalogs["year"].astype(str) +
df_catalogs["term"].astype(str) +
df_catalogs["subject"].astype(str) +
df_catalogs["number"].astype(str) +
df_catalogs["crn"].astype(str) +
df_catalogs["section"].astype(str))
df_catalogs["meetingId"] = (df_catalogs["year"].astype(str) +
df_catalogs["term"].astype(str) +
df_catalogs["crn"].astype(str) +
df_catalogs["subject"].astype(str) +
df_catalogs["number"].astype(str) +
df_catalogs["section"].astype(str) +
df_catalogs["meeting"].astype(str))
"""
df_catalogs
df_colleges = (pd.read_json("../Colleges.json", orient="index")
.reset_index()
.rename(columns={"index": "collegeId", 0: "name"}))
df_colleges.to_csv("../neo4j/nodes/college_nodes.csv", index=False)
df_colleges
df_subjects = (df_catalogs[["subject", "subject_name"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"subject": "subjectId", "subject_name": "name"}))
df_subjects.to_csv("../neo4j/nodes/subject_nodes.csv", index=False)
df_subjects
df_courses = (df_catalogs[["course", "number", "name", "description", "credit_hours"]]
.drop_duplicates(["course"], ignore_index=True)
.dropna()
.rename(columns={"course": "courseId", "credit_hours": "creditHours"})
.set_index(["courseId"]))
df_bad_descriptions = df_courses.loc[df_courses["description"].str.extract(r"See\s*([A-Z]{2,4}\s*[0-9]{3})").dropna().index]
df_bad_descriptions["see_course"] = df_bad_descriptions["description"].str.extract(r"See\s*([A-Z]{2,4}\s*[0-9]{3})")[0].values
df_bad_descriptions["better_description"] = df_courses.loc[df_bad_descriptions["see_course"].values, "description"].values
df_courses.loc[df_bad_descriptions.index, "description"] = df_bad_descriptions.apply(lambda row: row["better_description"].replace(row.name, row["see_course"]), axis=1)
df_courses.reset_index(inplace=True)
df_courses.to_csv("../neo4j/nodes/course_nodes.csv", index=False)
df_courses
df_courses["prerequisites"] = df_courses["description"].str.extract("Prerequisite:\s*([^.]*)")
df_courses
df_courses[df_courses["prerequisites"].str.contains("One of", case=False, na=False)]
df_courses[df_courses["prerequisites"].str.contains("both", case=False, na=False)]
df_courses[df_courses["creditHours"].str.contains("\d.\d", case=False, na=False)]
df_courses[df_courses["creditHours"].str.contains("or", case=False, na=False)]
df_courses[df_courses["creditHours"].str.contains("to", case=False, na=False)]
df_courses.set_index("courseId")
df_courses["undergraduate"] = df_courses["description"].str.extract("(\d+\s+)undergraduate hour")
df_courses[df_courses["undergraduate"].str.contains("", case=False, na=False)]
df_courses["graduate"] = df_courses["description"].str.extract("(\d+\s+)graduate hour")
df_courses[df_courses["graduate"].str.contains("", case=False, na=False)]
"""
MATCH (course:Course)
WITH course, apoc.text.regexGroups(course.description, "[.]\s*(\d*\s*?[or|OR|to|TO]*\s*?\d*)\s*undergraduate hour") as undergradMatches, apoc.text.regexGroups(course.description, "[.]\s*(\d*\s*?[or|OR|to|TO]*\s*?\d*)\s*graduate hour") as gradMatches
WITH course,
CASE
WHEN size(undergradMatches) = 0 OR size(undergradMatches[0]) < 2 THEN course.creditHours
ELSE undergradMatches[0][1]
END AS undergradCredits,
CASE
WHEN size(gradMatches) = 0 OR size(gradMatches[0]) < 2 THEN course.creditHours
ELSE gradMatches[0][1]
END AS gradCredits
WITH course, apoc.text.regexGroups(undergradCredits, "\d+([\.]\d+)?") as undergradCreditMatches, apoc.text.regexGroups(gradCredits, "\d+([\.]\d+)?") as gradCreditMatches
WITH course, undergradCreditMatches, gradCreditMatches, undergradCreditMatches[0][0] as undergradMin, gradCreditMatches[0][0] as gradMin
WITH course, undergradMin, gradMin,
CASE
WHEN size(undergradCreditMatches) = 1 THEN undergradMin
WHEN size(undergradCreditMatches) > 1 THEN undergradCreditMatches[1][0]
ELSE 0
END AS undergradMax,
CASE
WHEN size(gradCreditMatches) = 1 THEN gradMin
WHEN size(gradCreditMatches) > 1 THEN gradCreditMatches[1][0]
ELSE 0
END AS gradMax
RETURN course, {
undergraduate: {
max: undergradMax,
min: undergradMin
},
graduate: {
max: gradMax,
min: gradMin
}
} AS result
"""
df_sections = (df_catalogs[["crn", "year", "term", "part_of_term",
"gpa", "A+", "A", "A-", "B+", "B", "B-",
"C+", "C", "C-", "D+", "D", "D-", "F",
"section", "section_info", "section_notes",
"section_attributes", "section_capp_area",
"section_co_request", "section_special_approval"]]
.drop_duplicates(subset=["crn", "year", "term"])
.dropna(subset=["crn", "year", "term"])
.rename(columns={"part_of_term": "partOfTerm",
"section_info": "sectionInfo", "section_notes": "sectionNotes",
"section_attributes": "sectionAttributes", "section_capp_area": "sectionCappArea",
"section_co_request": "sectionCoRequest", "section_special_approval": "sectionSpecialApproval"}))
#df_sections[["A+", "A", "A-", "B+", "B", "B-","C+", "C", "C-", "D+", "D", "D-", "F"]] = df_sections[["A+", "A", "A-", "B+", "B", "B-","C+", "C", "C-", "D+", "D", "D-", "F"]].fillna(0)
df_sections.to_csv("../neo4j/nodes/section_nodes.csv", index=False)
df_sections
df_meetings = (df_catalogs[["crn", "year", "term",
"start_date", "end_date", "start_time",
"end_time", "type", "type_name", "meeting",
"days", "room", "building"]]
.drop_duplicates(subset=["crn", "year", "term", "meeting"])
.dropna(subset=["crn", "year", "term", "type", "meeting"])
.rename(columns={"start_date": "startDate", "end_date": "endDate",
"start_time": "startTime", "end_time": "endTime",
"type": "typeId", "type_name": "name", "course": "courseId"}))
df_meetings = (df_meetings
.set_index(["crn", "year", "term", "meeting"])
.merge((df_catalogs[["crn", "year", "term", "meeting", "instructor"]]
.replace(np.nan, "")
.drop_duplicates()
.dropna()
.groupby(["crn", "year", "term", "meeting"])
.agg({"instructor": ':'.join})),
how="left",
on=["crn", "year", "term", "meeting"])
.reset_index())
df_meetings.to_csv("../neo4j/nodes/meeting_nodes.csv", index=False)
df_meetings
df_sections_meetings = (df_catalogs[["crn", "year", "term", "part_of_term",
"gpa", "A+", "A", "A-", "B+", "B", "B-",
"C+", "C", "C-", "D+", "D", "D-", "F",
"start_date", "end_date", "start_time",
"end_time", "type", "type_name",
"days", "room", "building", "meeting",
"section", "section_info", "section_notes",
"section_attributes", "section_capp_area",
"section_co_request", "section_special_approval"]]
.drop_duplicates()
.dropna(subset=["crn", "section", "year", "term", "part_of_term", "type"])
.rename(columns={"part_of_term": "partOfTerm",
"section_info": "sectionInfo",
"section_notes": "sectionNotes",
"section_attributes": "sectionAttributes",
"section_capp_area": "sectionCappArea",
"section_co_request": "sectionCoRequest",
"section_special_approval": "sectionSpecialApproval",
"start_date": "startDate", "end_date": "endDate",
"start_time": "startTime", "end_time": "endTime",
"type": "typeId", "type_name": "name"}))
df_sections_meetings.to_csv("../neo4j/sections_meetings.csv", index=False)
df_sections_meetings
df_instructors = (df_catalogs[["instructor"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"instructor": "name"}))
df_instructors.to_csv("../neo4j/nodes/instructor_nodes.csv", index=False)
df_instructors
df_gen_eds = (df_catalogs[["gen_ed", "gen_ed_name"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"gen_ed": "genEdId", "gen_ed_name": "name"}))
df_gen_eds.to_csv("../neo4j/nodes/gen_ed_nodes.csv", index=False)
df_gen_eds
df_colleges_to_subjects = (df_catalogs[["college", "subject"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"college": "collegeId", "subject": "subjectId"}))
df_colleges_to_subjects.to_csv("../neo4j/relationships/colleges_to_subjects.csv", index=False)
df_colleges_to_subjects
df_subjects_to_courses = (df_catalogs[["subject", "course"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"subject": "subjectId", "course": "courseId"}))
df_subjects_to_courses.to_csv("../neo4j/relationships/subjects_to_courses.csv", index=False)
df_subjects_to_courses
df_courses_to_sections = (df_catalogs[["course", "crn", "year", "term"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"course": "courseId"}))
df_courses_to_sections.to_csv("../neo4j/relationships/courses_to_sections.csv", index=False)
df_courses_to_sections
df_gen_eds_to_courses = (df_catalogs[["course", "gen_ed"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"course": "courseId", "gen_ed": "genEdId"}))
df_gen_eds_to_courses.to_csv("../neo4j/relationships/gen_eds_to_courses.csv", index=False)
df_gen_eds_to_courses
df_instructors_to_meetings = (df_catalogs[["year", "term", "part_of_term", "crn", "type", "meeting", "instructor"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"part_of_term": "partOfTerm", "type": "typeId"}))
df_instructors_to_meetings.to_csv("../neo4j/relationships/instructors_to_meetings.csv", index=False)
df_instructors_to_meetings
```
|
github_jupyter
|
from pathlib import Path
import pandas as pd
import numpy as np
import json
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', None)
def trim_all_columns(df):
"""
Trim whitespace from ends of each value across all series in dataframe
"""
trim_strings = lambda x: x.strip() if isinstance(x, str) else x
return df.applymap(trim_strings)
df_gpa = pd.read_csv("https://github.com/wadefagen/datasets/raw/master/gpa/uiuc-gpa-dataset.csv").rename(columns={"Year":"year","Term":"term","Subject":"subject","Number":"number","Primary Instructor":"instructor"})
df_gpa["instructor"] = df_gpa['instructor'].str.extract(r'(\w+, \w)')
df_gpa['total_students'] = df_gpa['A+'] + df_gpa['A'] + df_gpa['A-'] + df_gpa['B'] + df_gpa['B+'] + df_gpa['B-'] + df_gpa['C+'] + df_gpa['C'] + df_gpa['C-'] + df_gpa['D+'] + df_gpa['D'] + df_gpa['D-'] + df_gpa['F']
df_gpa['gpa'] = (df_gpa['A+'] * 4 + df_gpa['A'] * 4 + df_gpa['A-'] * 3.67 + df_gpa['B'] * 3 + df_gpa['B+'] * 3.33 + df_gpa['B-'] * 2.67 + df_gpa['C+'] * 2.33 + df_gpa['C'] * 2 + df_gpa['C-'] * 1.67 + df_gpa['D+'] * 1.33 + df_gpa['D'] + df_gpa['D-'] * 0.67) / df_gpa['total_students']
df_gpa = df_gpa.groupby(["year", "term", "subject", "number", "instructor"], as_index=False).agg({"gpa": "mean", "total_students": "sum", "A+": "sum", "A": "sum", "A-": "sum", "B+": "sum", "B": "sum", "B-": "sum", "C+": "sum", "C": "sum", "C-": "sum", "D+": "sum", "D": "sum", "D-": "sum", "F": "sum"})
df_gpa["course"] = df_gpa["subject"] + " " + df_gpa["number"].astype(str)
df_gpa
terms = [
"../raw/{}-{}.csv".format(year, term)
for year in range(df_gpa["year"].min(), df_gpa["year"].max() + 2)
for term in ["Winter", "Spring", "Summer", "Fall"]
if Path("../raw/{}-{}.csv".format(year, term)).is_file()
]
df_catalogs = pd.concat([pd.read_csv(term) for term in terms], ignore_index=True)
df_catalogs["term"] = pd.Categorical(df_catalogs["term"], ["Fall","Summer","Spring","Winter"], ordered=True)
df_catalogs.sort_values(by=["year", "term", "subject", "number", "crn", "meeting"], ascending=[False, True, True, True, True, True], ignore_index=True, inplace=True)
df_catalogs["course"] = df_catalogs["subject"] + " " + df_catalogs["number"].astype(str)
# Fix typos in descriptions
df_catalogs.loc[df_catalogs["course"]=="HIST 574", "description"]="Immerses students in major works of recent American religious history. Written from multiple disciplinary perspectives and wrestling with the knotty problems in which religion has been interwoven, these books will give the student a solid foundation in American religious history. 4 graduate hours. No professional credit."
df_catalogs.loc[df_catalogs["course"]=="ASST 104", "description"] = "Same as REL 104. See REL 104."
df_catalogs.loc[df_catalogs["course"]=="EPOL 551", "description"] = "Same as EOL 570. See EOL 570."
df_catalogs = trim_all_columns(df_catalogs).merge(df_gpa, how="left", on=["year", "term", "course", "subject", "number", "instructor"])
"""
df_catalogs["sectionId"] = (df_catalogs["year"].astype(str) +
df_catalogs["term"].astype(str) +
df_catalogs["subject"].astype(str) +
df_catalogs["number"].astype(str) +
df_catalogs["crn"].astype(str) +
df_catalogs["section"].astype(str))
df_catalogs["meetingId"] = (df_catalogs["year"].astype(str) +
df_catalogs["term"].astype(str) +
df_catalogs["crn"].astype(str) +
df_catalogs["subject"].astype(str) +
df_catalogs["number"].astype(str) +
df_catalogs["section"].astype(str) +
df_catalogs["meeting"].astype(str))
"""
df_catalogs
df_colleges = (pd.read_json("../Colleges.json", orient="index")
.reset_index()
.rename(columns={"index": "collegeId", 0: "name"}))
df_colleges.to_csv("../neo4j/nodes/college_nodes.csv", index=False)
df_colleges
df_subjects = (df_catalogs[["subject", "subject_name"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"subject": "subjectId", "subject_name": "name"}))
df_subjects.to_csv("../neo4j/nodes/subject_nodes.csv", index=False)
df_subjects
df_courses = (df_catalogs[["course", "number", "name", "description", "credit_hours"]]
.drop_duplicates(["course"], ignore_index=True)
.dropna()
.rename(columns={"course": "courseId", "credit_hours": "creditHours"})
.set_index(["courseId"]))
df_bad_descriptions = df_courses.loc[df_courses["description"].str.extract(r"See\s*([A-Z]{2,4}\s*[0-9]{3})").dropna().index]
df_bad_descriptions["see_course"] = df_bad_descriptions["description"].str.extract(r"See\s*([A-Z]{2,4}\s*[0-9]{3})")[0].values
df_bad_descriptions["better_description"] = df_courses.loc[df_bad_descriptions["see_course"].values, "description"].values
df_courses.loc[df_bad_descriptions.index, "description"] = df_bad_descriptions.apply(lambda row: row["better_description"].replace(row.name, row["see_course"]), axis=1)
df_courses.reset_index(inplace=True)
df_courses.to_csv("../neo4j/nodes/course_nodes.csv", index=False)
df_courses
df_courses["prerequisites"] = df_courses["description"].str.extract("Prerequisite:\s*([^.]*)")
df_courses
df_courses[df_courses["prerequisites"].str.contains("One of", case=False, na=False)]
df_courses[df_courses["prerequisites"].str.contains("both", case=False, na=False)]
df_courses[df_courses["creditHours"].str.contains("\d.\d", case=False, na=False)]
df_courses[df_courses["creditHours"].str.contains("or", case=False, na=False)]
df_courses[df_courses["creditHours"].str.contains("to", case=False, na=False)]
df_courses.set_index("courseId")
df_courses["undergraduate"] = df_courses["description"].str.extract("(\d+\s+)undergraduate hour")
df_courses[df_courses["undergraduate"].str.contains("", case=False, na=False)]
df_courses["graduate"] = df_courses["description"].str.extract("(\d+\s+)graduate hour")
df_courses[df_courses["graduate"].str.contains("", case=False, na=False)]
"""
MATCH (course:Course)
WITH course, apoc.text.regexGroups(course.description, "[.]\s*(\d*\s*?[or|OR|to|TO]*\s*?\d*)\s*undergraduate hour") as undergradMatches, apoc.text.regexGroups(course.description, "[.]\s*(\d*\s*?[or|OR|to|TO]*\s*?\d*)\s*graduate hour") as gradMatches
WITH course,
CASE
WHEN size(undergradMatches) = 0 OR size(undergradMatches[0]) < 2 THEN course.creditHours
ELSE undergradMatches[0][1]
END AS undergradCredits,
CASE
WHEN size(gradMatches) = 0 OR size(gradMatches[0]) < 2 THEN course.creditHours
ELSE gradMatches[0][1]
END AS gradCredits
WITH course, apoc.text.regexGroups(undergradCredits, "\d+([\.]\d+)?") as undergradCreditMatches, apoc.text.regexGroups(gradCredits, "\d+([\.]\d+)?") as gradCreditMatches
WITH course, undergradCreditMatches, gradCreditMatches, undergradCreditMatches[0][0] as undergradMin, gradCreditMatches[0][0] as gradMin
WITH course, undergradMin, gradMin,
CASE
WHEN size(undergradCreditMatches) = 1 THEN undergradMin
WHEN size(undergradCreditMatches) > 1 THEN undergradCreditMatches[1][0]
ELSE 0
END AS undergradMax,
CASE
WHEN size(gradCreditMatches) = 1 THEN gradMin
WHEN size(gradCreditMatches) > 1 THEN gradCreditMatches[1][0]
ELSE 0
END AS gradMax
RETURN course, {
undergraduate: {
max: undergradMax,
min: undergradMin
},
graduate: {
max: gradMax,
min: gradMin
}
} AS result
"""
df_sections = (df_catalogs[["crn", "year", "term", "part_of_term",
"gpa", "A+", "A", "A-", "B+", "B", "B-",
"C+", "C", "C-", "D+", "D", "D-", "F",
"section", "section_info", "section_notes",
"section_attributes", "section_capp_area",
"section_co_request", "section_special_approval"]]
.drop_duplicates(subset=["crn", "year", "term"])
.dropna(subset=["crn", "year", "term"])
.rename(columns={"part_of_term": "partOfTerm",
"section_info": "sectionInfo", "section_notes": "sectionNotes",
"section_attributes": "sectionAttributes", "section_capp_area": "sectionCappArea",
"section_co_request": "sectionCoRequest", "section_special_approval": "sectionSpecialApproval"}))
#df_sections[["A+", "A", "A-", "B+", "B", "B-","C+", "C", "C-", "D+", "D", "D-", "F"]] = df_sections[["A+", "A", "A-", "B+", "B", "B-","C+", "C", "C-", "D+", "D", "D-", "F"]].fillna(0)
df_sections.to_csv("../neo4j/nodes/section_nodes.csv", index=False)
df_sections
df_meetings = (df_catalogs[["crn", "year", "term",
"start_date", "end_date", "start_time",
"end_time", "type", "type_name", "meeting",
"days", "room", "building"]]
.drop_duplicates(subset=["crn", "year", "term", "meeting"])
.dropna(subset=["crn", "year", "term", "type", "meeting"])
.rename(columns={"start_date": "startDate", "end_date": "endDate",
"start_time": "startTime", "end_time": "endTime",
"type": "typeId", "type_name": "name", "course": "courseId"}))
df_meetings = (df_meetings
.set_index(["crn", "year", "term", "meeting"])
.merge((df_catalogs[["crn", "year", "term", "meeting", "instructor"]]
.replace(np.nan, "")
.drop_duplicates()
.dropna()
.groupby(["crn", "year", "term", "meeting"])
.agg({"instructor": ':'.join})),
how="left",
on=["crn", "year", "term", "meeting"])
.reset_index())
df_meetings.to_csv("../neo4j/nodes/meeting_nodes.csv", index=False)
df_meetings
df_sections_meetings = (df_catalogs[["crn", "year", "term", "part_of_term",
"gpa", "A+", "A", "A-", "B+", "B", "B-",
"C+", "C", "C-", "D+", "D", "D-", "F",
"start_date", "end_date", "start_time",
"end_time", "type", "type_name",
"days", "room", "building", "meeting",
"section", "section_info", "section_notes",
"section_attributes", "section_capp_area",
"section_co_request", "section_special_approval"]]
.drop_duplicates()
.dropna(subset=["crn", "section", "year", "term", "part_of_term", "type"])
.rename(columns={"part_of_term": "partOfTerm",
"section_info": "sectionInfo",
"section_notes": "sectionNotes",
"section_attributes": "sectionAttributes",
"section_capp_area": "sectionCappArea",
"section_co_request": "sectionCoRequest",
"section_special_approval": "sectionSpecialApproval",
"start_date": "startDate", "end_date": "endDate",
"start_time": "startTime", "end_time": "endTime",
"type": "typeId", "type_name": "name"}))
df_sections_meetings.to_csv("../neo4j/sections_meetings.csv", index=False)
df_sections_meetings
df_instructors = (df_catalogs[["instructor"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"instructor": "name"}))
df_instructors.to_csv("../neo4j/nodes/instructor_nodes.csv", index=False)
df_instructors
df_gen_eds = (df_catalogs[["gen_ed", "gen_ed_name"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"gen_ed": "genEdId", "gen_ed_name": "name"}))
df_gen_eds.to_csv("../neo4j/nodes/gen_ed_nodes.csv", index=False)
df_gen_eds
df_colleges_to_subjects = (df_catalogs[["college", "subject"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"college": "collegeId", "subject": "subjectId"}))
df_colleges_to_subjects.to_csv("../neo4j/relationships/colleges_to_subjects.csv", index=False)
df_colleges_to_subjects
df_subjects_to_courses = (df_catalogs[["subject", "course"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"subject": "subjectId", "course": "courseId"}))
df_subjects_to_courses.to_csv("../neo4j/relationships/subjects_to_courses.csv", index=False)
df_subjects_to_courses
df_courses_to_sections = (df_catalogs[["course", "crn", "year", "term"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"course": "courseId"}))
df_courses_to_sections.to_csv("../neo4j/relationships/courses_to_sections.csv", index=False)
df_courses_to_sections
df_gen_eds_to_courses = (df_catalogs[["course", "gen_ed"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"course": "courseId", "gen_ed": "genEdId"}))
df_gen_eds_to_courses.to_csv("../neo4j/relationships/gen_eds_to_courses.csv", index=False)
df_gen_eds_to_courses
df_instructors_to_meetings = (df_catalogs[["year", "term", "part_of_term", "crn", "type", "meeting", "instructor"]]
.drop_duplicates(ignore_index=True)
.dropna()
.rename(columns={"part_of_term": "partOfTerm", "type": "typeId"}))
df_instructors_to_meetings.to_csv("../neo4j/relationships/instructors_to_meetings.csv", index=False)
df_instructors_to_meetings
| 0.443721 | 0.408808 |
```
from __future__ import division, print_function
%matplotlib inline
import sys
sys.path.insert(0,'..') # allow us to format the book
sys.path.insert(0,'../kf_book')
# use same formattibng as rest of book so that the plots are
# consistant with that look and feel.
import book_format
#book_format.load_style(directory='..')
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randn, random, uniform, seed
import scipy.stats
class ParticleFilter(object):
def __init__(self, N, x_dim, y_dim):
self.particles = np.empty((N, 3)) # x, y, heading
self.N = N
self.x_dim = x_dim
self.y_dim = y_dim
# distribute particles randomly with uniform weight
self.weights = np.empty(N)
self.weights.fill(1./N)
self.particles[:, 0] = uniform(0, x_dim, size=N)
self.particles[:, 1] = uniform(0, y_dim, size=N)
self.particles[:, 2] = uniform(0, 2*np.pi, size=N)
def predict(self, u, std):
""" move according to control input u with noise std"""
self.particles[:, 2] += u[0] + randn(self.N) * std[0]
self.particles[:, 2] %= 2 * np.pi
d = u[1] + randn(self.N)
self.particles[:, 0] += np.cos(self.particles[:, 2]) * d
self.particles[:, 1] += np.sin(self.particles[:, 2]) * d
self.particles[:, 0:2] += u + randn(self.N, 2) * std
def weight(self, z, var):
dist = np.sqrt((self.particles[:, 0] - z[0])**2 +
(self.particles[:, 1] - z[1])**2)
# simplification assumes variance is invariant to world projection
n = scipy.stats.norm(0, np.sqrt(var))
prob = n.pdf(dist)
# particles far from a measurement will give us 0.0 for a probability
# due to floating point limits. Once we hit zero we can never recover,
# so add some small nonzero value to all points.
prob += 1.e-12
self.weights += prob
self.weights /= sum(self.weights) # normalize
def neff(self):
return 1. / np.sum(np.square(self.weights))
def resample(self):
p = np.zeros((self.N, 3))
w = np.zeros(self.N)
cumsum = np.cumsum(self.weights)
for i in range(self.N):
index = np.searchsorted(cumsum, random())
p[i] = self.particles[index]
w[i] = self.weights[index]
self.particles = p
self.weights.fill(1.0 / self.N)
def estimate(self):
""" returns mean and variance """
pos = self.particles[:, 0:2]
mu = np.average(pos, weights=self.weights, axis=0)
var = np.average((pos - mu)**2, weights=self.weights, axis=0)
return mu, var
from pf_internal import plot_pf
seed(1234)
N = 3000
pf = ParticleFilter(N, 20, 20)
xs = np.linspace (1, 10, 20)
ys = np.linspace (1, 10, 20)
zxs = xs + randn(20)
zys = xs + randn(20)
def animatepf(i):
if i == 0:
plot_pf(pf, 10, 10, weights=False)
idx = int((i-1) / 3)
x, y = xs[idx], ys[idx]
z = [x + randn()*0.2, y + randn()*0.2]
step = (i % 3) + 1
if step == 2:
pf.predict((0.5, 0.5), (0.2, 0.2))
pf.weight(z=z, var=.6)
plot_pf(pf, 10, 10, weights=False)
plt.title('Step {}: Predict'.format(idx+1))
elif step == 3:
pf.resample()
plot_pf(pf, 10, 10, weights=False)
plt.title('Step {}: Resample'.format(idx+1))
else:
mu, var = pf.estimate()
plot_pf(pf, 10, 10, weights=False)
plt.scatter(mu[0], mu[1], color='g', s=100, label='PF')
plt.scatter(x, y, marker='x', color='r', s=180, lw=3, label='Robot')
plt.title('Step {}: Estimate'.format(idx+1))
#plt.scatter(mu[0], mu[1], color='g', s=100, label="PF")
#plt.scatter([x+1], [x+1], marker='x', color='r', s=180, label="True", lw=3)
plt.legend(scatterpoints=1, loc=2)
plt.tight_layout()
from gif_animate import animate
animate('particle_filter_anim.gif', animatepf,
frames=40, interval=800, figsize=(4, 4))
```
<img src='particle_filter_anim.gif'>
|
github_jupyter
|
from __future__ import division, print_function
%matplotlib inline
import sys
sys.path.insert(0,'..') # allow us to format the book
sys.path.insert(0,'../kf_book')
# use same formattibng as rest of book so that the plots are
# consistant with that look and feel.
import book_format
#book_format.load_style(directory='..')
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randn, random, uniform, seed
import scipy.stats
class ParticleFilter(object):
def __init__(self, N, x_dim, y_dim):
self.particles = np.empty((N, 3)) # x, y, heading
self.N = N
self.x_dim = x_dim
self.y_dim = y_dim
# distribute particles randomly with uniform weight
self.weights = np.empty(N)
self.weights.fill(1./N)
self.particles[:, 0] = uniform(0, x_dim, size=N)
self.particles[:, 1] = uniform(0, y_dim, size=N)
self.particles[:, 2] = uniform(0, 2*np.pi, size=N)
def predict(self, u, std):
""" move according to control input u with noise std"""
self.particles[:, 2] += u[0] + randn(self.N) * std[0]
self.particles[:, 2] %= 2 * np.pi
d = u[1] + randn(self.N)
self.particles[:, 0] += np.cos(self.particles[:, 2]) * d
self.particles[:, 1] += np.sin(self.particles[:, 2]) * d
self.particles[:, 0:2] += u + randn(self.N, 2) * std
def weight(self, z, var):
dist = np.sqrt((self.particles[:, 0] - z[0])**2 +
(self.particles[:, 1] - z[1])**2)
# simplification assumes variance is invariant to world projection
n = scipy.stats.norm(0, np.sqrt(var))
prob = n.pdf(dist)
# particles far from a measurement will give us 0.0 for a probability
# due to floating point limits. Once we hit zero we can never recover,
# so add some small nonzero value to all points.
prob += 1.e-12
self.weights += prob
self.weights /= sum(self.weights) # normalize
def neff(self):
return 1. / np.sum(np.square(self.weights))
def resample(self):
p = np.zeros((self.N, 3))
w = np.zeros(self.N)
cumsum = np.cumsum(self.weights)
for i in range(self.N):
index = np.searchsorted(cumsum, random())
p[i] = self.particles[index]
w[i] = self.weights[index]
self.particles = p
self.weights.fill(1.0 / self.N)
def estimate(self):
""" returns mean and variance """
pos = self.particles[:, 0:2]
mu = np.average(pos, weights=self.weights, axis=0)
var = np.average((pos - mu)**2, weights=self.weights, axis=0)
return mu, var
from pf_internal import plot_pf
seed(1234)
N = 3000
pf = ParticleFilter(N, 20, 20)
xs = np.linspace (1, 10, 20)
ys = np.linspace (1, 10, 20)
zxs = xs + randn(20)
zys = xs + randn(20)
def animatepf(i):
if i == 0:
plot_pf(pf, 10, 10, weights=False)
idx = int((i-1) / 3)
x, y = xs[idx], ys[idx]
z = [x + randn()*0.2, y + randn()*0.2]
step = (i % 3) + 1
if step == 2:
pf.predict((0.5, 0.5), (0.2, 0.2))
pf.weight(z=z, var=.6)
plot_pf(pf, 10, 10, weights=False)
plt.title('Step {}: Predict'.format(idx+1))
elif step == 3:
pf.resample()
plot_pf(pf, 10, 10, weights=False)
plt.title('Step {}: Resample'.format(idx+1))
else:
mu, var = pf.estimate()
plot_pf(pf, 10, 10, weights=False)
plt.scatter(mu[0], mu[1], color='g', s=100, label='PF')
plt.scatter(x, y, marker='x', color='r', s=180, lw=3, label='Robot')
plt.title('Step {}: Estimate'.format(idx+1))
#plt.scatter(mu[0], mu[1], color='g', s=100, label="PF")
#plt.scatter([x+1], [x+1], marker='x', color='r', s=180, label="True", lw=3)
plt.legend(scatterpoints=1, loc=2)
plt.tight_layout()
from gif_animate import animate
animate('particle_filter_anim.gif', animatepf,
frames=40, interval=800, figsize=(4, 4))
| 0.551574 | 0.699396 |
```
##using face recognition
!pip install face_recognition
import matplotlib.pyplot as plt
from skimage import data, exposure
from skimage.feature import hog
#!pip install opencv-python
import cv2
#!pip install urllib3
import urllib
from google.colab.patches import cv2_imshow
#!wget "https://drive.google.com/file/d/1od6Z2OUsgzB3Pk72BB_FDV9sjGIs1UJY/view?usp=sharing"
#image = io.imread("https://drive.google.com/file/d/1od6Z2OUsgzB3Pk72BB_FDV9sjGIs1UJY/view?usp=sharing")
img1 = cv2.imread("maheshbabu.jpg")
image=cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
fd,hog_image = hog(image,orientations=8,pixels_per_cell=(16,16),cells_per_block=(1,1),visualize = True,multichannel=True)
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(8,4),sharex=True ,sharey= True)
ax1.axis("off")
ax1.imshow(image,cmap=plt.cm.gray)
ax1.set_title("Input")
hog_res=exposure.rescale_intensity(hog_image,in_range=(0,10))
ax2.axis("off")
ax2.imshow(hog_res,cmap=plt.cm.gray)
ax2.set_title("Hist of oriented gradients")
plt.show()
image.shape
import face_recognition
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.patches import Circle
import numpy as np
%matplotlib inline
face_locations=face_recognition.face_locations(image)
number_of_faces = len(face_locations)
print("found {} faces".format(number_of_faces))
plt.imshow(image)
ax=plt.gca()
for face_location in face_locations:
top,right,bottom,left=face_location
x,y,w,h = left,top,right,bottom
rect=Rectangle((x,y),w-x,h-y,fill=False,color = "yellow")
ax.add_patch(rect)
plt.show()
img1 = cv2.imread("maheshbabu.jpg")
mb=cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
plt.imshow(mb)
img2 = cv2.imread("vd.jpg")
vd=cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
plt.imshow(vd)
img3 = cv2.imread("karthik.jpg")
k=cv2.cvtColor(img3, cv2.COLOR_BGR2RGB)
plt.imshow(k)
img4 = cv2.imread("ran.jpg")
r=cv2.cvtColor(img4, cv2.COLOR_BGR2RGB)
plt.imshow(r)
img5 = cv2.imread("varun3.jpg")
v=cv2.cvtColor(img5, cv2.COLOR_BGR2RGB)
plt.imshow(v)
mb_encoding = face_recognition.face_encodings(mb)[0]
vd_encoding = face_recognition.face_encodings(vd)[0]
k_encoding = face_recognition.face_encodings(k)[0]
r_encoding = face_recognition.face_encodings(r)
v_encoding = face_recognition.face_encodings(v)[0]
known_face_encodings = [
mb_encoding,
vd_encoding,
k_encoding,
r_encoding,
v_encoding
]
image = cv2.imread("vdmb.jpg")
unknown_image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
plt.imshow(unknown_image)
unknown_face_encodings=face_recognition.face_encodings(unknown_image)
from scipy.spatial import distance
for unknown_face_encoding in unknown_face_encodings:
results=[]
for known_face_encoding in known_face_encodings:
d = distance.euclidean(known_face_encoding,unknown_face_encoding)
results.append(d)
threshold = 0.6
results = np.array(results) <= threshold
name = "unknown"
if results[0]:
name = "mahesh babu"
elif results[1]:
name = "vijay"
elif results[2]:
name = "karthikeya"
elif results[3]:
name = "ranveer"
elif results[4]:
name = "varun"
print(f"{name} is in the pic")
```
|
github_jupyter
|
##using face recognition
!pip install face_recognition
import matplotlib.pyplot as plt
from skimage import data, exposure
from skimage.feature import hog
#!pip install opencv-python
import cv2
#!pip install urllib3
import urllib
from google.colab.patches import cv2_imshow
#!wget "https://drive.google.com/file/d/1od6Z2OUsgzB3Pk72BB_FDV9sjGIs1UJY/view?usp=sharing"
#image = io.imread("https://drive.google.com/file/d/1od6Z2OUsgzB3Pk72BB_FDV9sjGIs1UJY/view?usp=sharing")
img1 = cv2.imread("maheshbabu.jpg")
image=cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
fd,hog_image = hog(image,orientations=8,pixels_per_cell=(16,16),cells_per_block=(1,1),visualize = True,multichannel=True)
fig,(ax1,ax2)=plt.subplots(1,2,figsize=(8,4),sharex=True ,sharey= True)
ax1.axis("off")
ax1.imshow(image,cmap=plt.cm.gray)
ax1.set_title("Input")
hog_res=exposure.rescale_intensity(hog_image,in_range=(0,10))
ax2.axis("off")
ax2.imshow(hog_res,cmap=plt.cm.gray)
ax2.set_title("Hist of oriented gradients")
plt.show()
image.shape
import face_recognition
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.patches import Circle
import numpy as np
%matplotlib inline
face_locations=face_recognition.face_locations(image)
number_of_faces = len(face_locations)
print("found {} faces".format(number_of_faces))
plt.imshow(image)
ax=plt.gca()
for face_location in face_locations:
top,right,bottom,left=face_location
x,y,w,h = left,top,right,bottom
rect=Rectangle((x,y),w-x,h-y,fill=False,color = "yellow")
ax.add_patch(rect)
plt.show()
img1 = cv2.imread("maheshbabu.jpg")
mb=cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
plt.imshow(mb)
img2 = cv2.imread("vd.jpg")
vd=cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
plt.imshow(vd)
img3 = cv2.imread("karthik.jpg")
k=cv2.cvtColor(img3, cv2.COLOR_BGR2RGB)
plt.imshow(k)
img4 = cv2.imread("ran.jpg")
r=cv2.cvtColor(img4, cv2.COLOR_BGR2RGB)
plt.imshow(r)
img5 = cv2.imread("varun3.jpg")
v=cv2.cvtColor(img5, cv2.COLOR_BGR2RGB)
plt.imshow(v)
mb_encoding = face_recognition.face_encodings(mb)[0]
vd_encoding = face_recognition.face_encodings(vd)[0]
k_encoding = face_recognition.face_encodings(k)[0]
r_encoding = face_recognition.face_encodings(r)
v_encoding = face_recognition.face_encodings(v)[0]
known_face_encodings = [
mb_encoding,
vd_encoding,
k_encoding,
r_encoding,
v_encoding
]
image = cv2.imread("vdmb.jpg")
unknown_image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
plt.imshow(unknown_image)
unknown_face_encodings=face_recognition.face_encodings(unknown_image)
from scipy.spatial import distance
for unknown_face_encoding in unknown_face_encodings:
results=[]
for known_face_encoding in known_face_encodings:
d = distance.euclidean(known_face_encoding,unknown_face_encoding)
results.append(d)
threshold = 0.6
results = np.array(results) <= threshold
name = "unknown"
if results[0]:
name = "mahesh babu"
elif results[1]:
name = "vijay"
elif results[2]:
name = "karthikeya"
elif results[3]:
name = "ranveer"
elif results[4]:
name = "varun"
print(f"{name} is in the pic")
| 0.363308 | 0.411879 |
# RadarCOVID-Report
## Data Extraction
```
import datetime
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import dataframe_image as dfi
import matplotlib.ticker
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
sns.set()
matplotlib.rcParams['figure.figsize'] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
```
### COVID-19 Cases
```
confirmed_df = pd.read_csv("https://covid19tracking.narrativa.com/csv/confirmed.csv")
radar_covid_countries = {"Spain"}
# radar_covid_regions = { ... }
confirmed_df = confirmed_df[confirmed_df["Country_EN"].isin(radar_covid_countries)]
# confirmed_df = confirmed_df[confirmed_df["Region"].isin(radar_covid_regions)]
# set(confirmed_df.Region.tolist()) == radar_covid_regions
confirmed_country_columns = list(filter(lambda x: x.startswith("Country_"), confirmed_df.columns))
confirmed_regional_columns = confirmed_country_columns + ["Region"]
confirmed_df.drop(columns=confirmed_regional_columns, inplace=True)
confirmed_df = confirmed_df.sum().to_frame()
confirmed_df.tail()
confirmed_df.reset_index(inplace=True)
confirmed_df.columns = ["sample_date_string", "cumulative_cases"]
confirmed_df.sort_values("sample_date_string", inplace=True)
confirmed_df["new_cases"] = confirmed_df.cumulative_cases.diff()
confirmed_df["rolling_mean_new_cases"] = confirmed_df.new_cases.rolling(7).mean()
confirmed_df.tail()
extraction_date_confirmed_df = \
confirmed_df[confirmed_df.sample_date_string == extraction_date]
extraction_previous_date_confirmed_df = \
confirmed_df[confirmed_df.sample_date_string == extraction_previous_date].copy()
if extraction_date_confirmed_df.empty and \
not extraction_previous_date_confirmed_df.empty:
extraction_previous_date_confirmed_df["sample_date_string"] = extraction_date
extraction_previous_date_confirmed_df["new_cases"] = \
extraction_previous_date_confirmed_df.rolling_mean_new_cases
extraction_previous_date_confirmed_df["cumulative_cases"] = \
extraction_previous_date_confirmed_df.new_cases + \
extraction_previous_date_confirmed_df.cumulative_cases
confirmed_df = confirmed_df.append(extraction_previous_date_confirmed_df)
confirmed_df.tail()
confirmed_df[["new_cases", "rolling_mean_new_cases"]].plot()
```
### Extract API TEKs
```
from Modules.RadarCOVID import radar_covid
exposure_keys_df = radar_covid.download_last_radar_covid_exposure_keys(days=14)
exposure_keys_df[[
"sample_date_string", "source_url", "region", "key_data"]].head()
exposure_keys_summary_df = \
exposure_keys_df.groupby(["sample_date_string"]).key_data.nunique().to_frame()
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "tek_count"}, inplace=True)
exposure_keys_summary_df.head()
```
### Dump API TEKs
```
tek_list_df = exposure_keys_df[["sample_date_string", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
"sample_date").tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
"Data/TEKs/Current/RadarCOVID-TEKs.json",
lines=True, orient="records")
tek_list_df.drop(columns=["extraction_date_with_hour"]).to_json(
"Data/TEKs/Daily/RadarCOVID-TEKs-" + extraction_date + ".json",
lines=True, orient="records")
tek_list_df.to_json(
"Data/TEKs/Hourly/RadarCOVID-TEKs-" + extraction_date_with_hour + ".json",
lines=True, orient="records")
tek_list_df.head()
```
### Load TEK Dumps
```
import glob
def load_extracted_teks(mode, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame()
paths = list(reversed(sorted(glob.glob(f"Data/TEKs/{mode}/RadarCOVID-TEKs-*.json"))))
if limit:
paths = paths[:limit]
for path in paths:
logging.info(f"Loading TEKs from '{path}'...")
iteration_extracted_teks_df = pd.read_json(path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
return extracted_teks_df
```
### Daily New TEKs
```
daily_extracted_teks_df = load_extracted_teks(mode="Daily", limit=14)
daily_extracted_teks_df.head()
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "new_tek_count",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.head()
new_tek_devices_df = daily_extracted_teks_df.copy()
new_tek_devices_df["new_sample_extraction_date"] = \
pd.to_datetime(new_tek_devices_df.sample_date) + datetime.timedelta(1)
new_tek_devices_df["extraction_date"] = pd.to_datetime(new_tek_devices_df.extraction_date)
new_tek_devices_df = new_tek_devices_df[
new_tek_devices_df.new_sample_extraction_date == new_tek_devices_df.extraction_date]
new_tek_devices_df.head()
new_tek_devices_df.set_index("extraction_date", inplace=True)
new_tek_devices_df = new_tek_devices_df.tek_list.apply(lambda x: len(set(x))).to_frame()
new_tek_devices_df.reset_index(inplace=True)
new_tek_devices_df.rename(columns={
"extraction_date": "sample_date_string",
"tek_list": "new_tek_devices"}, inplace=True)
new_tek_devices_df["sample_date_string"] = new_tek_devices_df.sample_date_string.dt.strftime("%Y-%m-%d")
new_tek_devices_df.head()
```
### Hourly New TEKs
```
hourly_extracted_teks_df = load_extracted_teks(mode="Hourly", limit=24)
hourly_extracted_teks_df.head()
hourly_tek_list_df = hourly_extracted_teks_df.groupby("extraction_date_with_hour").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
hourly_tek_list_df = hourly_tek_list_df.set_index("extraction_date_with_hour").sort_index(ascending=True)
hourly_new_tek_df = hourly_tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
hourly_new_tek_df.rename(columns={
"tek_list": "new_tek_count"}, inplace=True)
hourly_new_tek_df.tail()
hourly_new_tek_devices_df = hourly_extracted_teks_df.copy()
hourly_new_tek_devices_df["new_sample_extraction_date"] = \
pd.to_datetime(hourly_new_tek_devices_df.sample_date) + datetime.timedelta(1)
hourly_new_tek_devices_df["extraction_date"] = pd.to_datetime(hourly_new_tek_devices_df.extraction_date)
hourly_new_tek_devices_df = hourly_new_tek_devices_df[
hourly_new_tek_devices_df.new_sample_extraction_date == hourly_new_tek_devices_df.extraction_date]
hourly_new_tek_devices_df.set_index("extraction_date_with_hour", inplace=True)
hourly_new_tek_devices_df_ = pd.DataFrame()
for i, chunk_df in hourly_new_tek_devices_df.groupby("extraction_date"):
chunk_df = chunk_df.copy()
chunk_df.sort_index(inplace=True)
chunk_tek_count_df = chunk_df.tek_list.apply(lambda x: len(set(x)))
chunk_df = chunk_tek_count_df.diff().fillna(chunk_tek_count_df).to_frame()
hourly_new_tek_devices_df_ = hourly_new_tek_devices_df_.append(chunk_df)
hourly_new_tek_devices_df = hourly_new_tek_devices_df_
hourly_new_tek_devices_df.reset_index(inplace=True)
hourly_new_tek_devices_df.rename(columns={
"tek_list": "new_tek_devices"}, inplace=True)
hourly_new_tek_devices_df.tail()
hourly_summary_df = hourly_new_tek_df.merge(
hourly_new_tek_devices_df, on=["extraction_date_with_hour"], how="outer")
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df.tail()
```
### Data Merge
```
result_summary_df = exposure_keys_summary_df.merge(new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(new_tek_devices_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(confirmed_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df["tek_count_per_new_case"] = \
result_summary_df.tek_count / result_summary_df.rolling_mean_new_cases
result_summary_df["new_tek_count_per_new_case"] = \
result_summary_df.new_tek_count / result_summary_df.rolling_mean_new_cases
result_summary_df["new_tek_devices_per_new_case"] = \
result_summary_df.new_tek_devices / result_summary_df.rolling_mean_new_cases
result_summary_df["new_tek_count_per_new_tek_device"] = \
result_summary_df.new_tek_count / result_summary_df.new_tek_devices
result_summary_df.head()
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df.set_index("sample_date", inplace=True)
result_summary_df = result_summary_df.sort_index(ascending=False)
```
## Report Results
### Summary Table
```
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[[
"tek_count",
"new_tek_count",
"new_cases",
"rolling_mean_new_cases",
"tek_count_per_new_case",
"new_tek_count_per_new_case",
"new_tek_devices",
"new_tek_devices_per_new_case",
"new_tek_count_per_new_tek_device"]]
result_summary_df
```
### Summary Plots
```
summary_ax_list = result_summary_df[[
"rolling_mean_new_cases",
"tek_count",
"new_tek_count",
"new_tek_devices",
"new_tek_count_per_new_tek_device",
"new_tek_devices_per_new_case"
]].sort_index(ascending=True).plot.bar(
title="Summary", rot=45, subplots=True, figsize=(15, 22))
summary_ax_list[-1].yaxis.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
```
### Hourly Summary Plots
```
hourly_summary_ax_list = hourly_summary_df.plot.bar(
title="Last 24h Summary", rot=45, subplots=True)
```
### Publish Results
```
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
media_path = get_temporary_image_path()
dfi.export(df, media_path)
return media_path
summary_plots_image_path = save_temporary_plot_image(ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(df=result_summary_df)
hourly_summary_plots_image_path = save_temporary_plot_image(ax=hourly_summary_ax_list)
```
### Save Results
```
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(report_resources_path_prefix + "Summary-Table.html")
_ = shutil.copyfile(summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(summary_table_image_path, report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png")
report_daily_url_pattern = \
"https://github.com/pvieito/RadarCOVID-Report/blob/master/Notebooks/" \
"RadarCOVID-Report/{report_type}/RadarCOVID-Report-{report_date}.ipynb"
report_daily_url = report_daily_url_pattern.format(
report_type="Daily", report_date=extraction_date)
report_hourly_url = report_daily_url_pattern.format(
report_type="Hourly", report_date=extraction_date_with_hour)
```
### Publish on README
```
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
summary_table_html = result_summary_df.to_html()
readme_contents = readme_contents.format(
summary_table_html=summary_table_html,
report_url_with_hour=report_hourly_url,
extraction_date_with_hour=extraction_date_with_hour)
with open("README.md", "w") as f:
f.write(readme_contents)
```
### Publish on Twitter
```
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule":
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
hourly_summary_plots_media = api.media_upload(hourly_summary_plots_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
hourly_summary_plots_media.media_id,
]
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
new_teks = extraction_date_result_summary_df.new_tek_count.sum().astype(int)
new_teks_last_hour = extraction_date_result_hourly_summary_df.new_tek_count.sum().astype(int)
new_devices = extraction_date_result_summary_df.new_tek_devices.sum().astype(int)
new_devices_last_hour = extraction_date_result_hourly_summary_df.new_tek_devices.sum().astype(int)
new_tek_count_per_new_tek_device = \
extraction_date_result_summary_df.new_tek_count_per_new_tek_device.sum()
new_tek_devices_per_new_case = \
extraction_date_result_summary_df.new_tek_devices_per_new_case.sum()
status = textwrap.dedent(f"""
Report Update – {extraction_date_with_hour}
#ExposureNotification #RadarCOVID
Shared Diagnoses Day Summary:
- New TEKs: {new_teks} ({new_teks_last_hour:+d} last hour)
- New Devices: {new_devices} ({new_devices_last_hour:+d} last hour, {new_tek_count_per_new_tek_device:.2} TEKs/device)
- Usage Ratio: {new_tek_devices_per_new_case:.2%} devices/case
Report Link: {report_hourly_url}
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
```
|
github_jupyter
|
import datetime
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import dataframe_image as dfi
import matplotlib.ticker
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
sns.set()
matplotlib.rcParams['figure.figsize'] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
confirmed_df = pd.read_csv("https://covid19tracking.narrativa.com/csv/confirmed.csv")
radar_covid_countries = {"Spain"}
# radar_covid_regions = { ... }
confirmed_df = confirmed_df[confirmed_df["Country_EN"].isin(radar_covid_countries)]
# confirmed_df = confirmed_df[confirmed_df["Region"].isin(radar_covid_regions)]
# set(confirmed_df.Region.tolist()) == radar_covid_regions
confirmed_country_columns = list(filter(lambda x: x.startswith("Country_"), confirmed_df.columns))
confirmed_regional_columns = confirmed_country_columns + ["Region"]
confirmed_df.drop(columns=confirmed_regional_columns, inplace=True)
confirmed_df = confirmed_df.sum().to_frame()
confirmed_df.tail()
confirmed_df.reset_index(inplace=True)
confirmed_df.columns = ["sample_date_string", "cumulative_cases"]
confirmed_df.sort_values("sample_date_string", inplace=True)
confirmed_df["new_cases"] = confirmed_df.cumulative_cases.diff()
confirmed_df["rolling_mean_new_cases"] = confirmed_df.new_cases.rolling(7).mean()
confirmed_df.tail()
extraction_date_confirmed_df = \
confirmed_df[confirmed_df.sample_date_string == extraction_date]
extraction_previous_date_confirmed_df = \
confirmed_df[confirmed_df.sample_date_string == extraction_previous_date].copy()
if extraction_date_confirmed_df.empty and \
not extraction_previous_date_confirmed_df.empty:
extraction_previous_date_confirmed_df["sample_date_string"] = extraction_date
extraction_previous_date_confirmed_df["new_cases"] = \
extraction_previous_date_confirmed_df.rolling_mean_new_cases
extraction_previous_date_confirmed_df["cumulative_cases"] = \
extraction_previous_date_confirmed_df.new_cases + \
extraction_previous_date_confirmed_df.cumulative_cases
confirmed_df = confirmed_df.append(extraction_previous_date_confirmed_df)
confirmed_df.tail()
confirmed_df[["new_cases", "rolling_mean_new_cases"]].plot()
from Modules.RadarCOVID import radar_covid
exposure_keys_df = radar_covid.download_last_radar_covid_exposure_keys(days=14)
exposure_keys_df[[
"sample_date_string", "source_url", "region", "key_data"]].head()
exposure_keys_summary_df = \
exposure_keys_df.groupby(["sample_date_string"]).key_data.nunique().to_frame()
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "tek_count"}, inplace=True)
exposure_keys_summary_df.head()
tek_list_df = exposure_keys_df[["sample_date_string", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
"sample_date").tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
"Data/TEKs/Current/RadarCOVID-TEKs.json",
lines=True, orient="records")
tek_list_df.drop(columns=["extraction_date_with_hour"]).to_json(
"Data/TEKs/Daily/RadarCOVID-TEKs-" + extraction_date + ".json",
lines=True, orient="records")
tek_list_df.to_json(
"Data/TEKs/Hourly/RadarCOVID-TEKs-" + extraction_date_with_hour + ".json",
lines=True, orient="records")
tek_list_df.head()
import glob
def load_extracted_teks(mode, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame()
paths = list(reversed(sorted(glob.glob(f"Data/TEKs/{mode}/RadarCOVID-TEKs-*.json"))))
if limit:
paths = paths[:limit]
for path in paths:
logging.info(f"Loading TEKs from '{path}'...")
iteration_extracted_teks_df = pd.read_json(path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
return extracted_teks_df
daily_extracted_teks_df = load_extracted_teks(mode="Daily", limit=14)
daily_extracted_teks_df.head()
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "new_tek_count",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.head()
new_tek_devices_df = daily_extracted_teks_df.copy()
new_tek_devices_df["new_sample_extraction_date"] = \
pd.to_datetime(new_tek_devices_df.sample_date) + datetime.timedelta(1)
new_tek_devices_df["extraction_date"] = pd.to_datetime(new_tek_devices_df.extraction_date)
new_tek_devices_df = new_tek_devices_df[
new_tek_devices_df.new_sample_extraction_date == new_tek_devices_df.extraction_date]
new_tek_devices_df.head()
new_tek_devices_df.set_index("extraction_date", inplace=True)
new_tek_devices_df = new_tek_devices_df.tek_list.apply(lambda x: len(set(x))).to_frame()
new_tek_devices_df.reset_index(inplace=True)
new_tek_devices_df.rename(columns={
"extraction_date": "sample_date_string",
"tek_list": "new_tek_devices"}, inplace=True)
new_tek_devices_df["sample_date_string"] = new_tek_devices_df.sample_date_string.dt.strftime("%Y-%m-%d")
new_tek_devices_df.head()
hourly_extracted_teks_df = load_extracted_teks(mode="Hourly", limit=24)
hourly_extracted_teks_df.head()
hourly_tek_list_df = hourly_extracted_teks_df.groupby("extraction_date_with_hour").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
hourly_tek_list_df = hourly_tek_list_df.set_index("extraction_date_with_hour").sort_index(ascending=True)
hourly_new_tek_df = hourly_tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
hourly_new_tek_df.rename(columns={
"tek_list": "new_tek_count"}, inplace=True)
hourly_new_tek_df.tail()
hourly_new_tek_devices_df = hourly_extracted_teks_df.copy()
hourly_new_tek_devices_df["new_sample_extraction_date"] = \
pd.to_datetime(hourly_new_tek_devices_df.sample_date) + datetime.timedelta(1)
hourly_new_tek_devices_df["extraction_date"] = pd.to_datetime(hourly_new_tek_devices_df.extraction_date)
hourly_new_tek_devices_df = hourly_new_tek_devices_df[
hourly_new_tek_devices_df.new_sample_extraction_date == hourly_new_tek_devices_df.extraction_date]
hourly_new_tek_devices_df.set_index("extraction_date_with_hour", inplace=True)
hourly_new_tek_devices_df_ = pd.DataFrame()
for i, chunk_df in hourly_new_tek_devices_df.groupby("extraction_date"):
chunk_df = chunk_df.copy()
chunk_df.sort_index(inplace=True)
chunk_tek_count_df = chunk_df.tek_list.apply(lambda x: len(set(x)))
chunk_df = chunk_tek_count_df.diff().fillna(chunk_tek_count_df).to_frame()
hourly_new_tek_devices_df_ = hourly_new_tek_devices_df_.append(chunk_df)
hourly_new_tek_devices_df = hourly_new_tek_devices_df_
hourly_new_tek_devices_df.reset_index(inplace=True)
hourly_new_tek_devices_df.rename(columns={
"tek_list": "new_tek_devices"}, inplace=True)
hourly_new_tek_devices_df.tail()
hourly_summary_df = hourly_new_tek_df.merge(
hourly_new_tek_devices_df, on=["extraction_date_with_hour"], how="outer")
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df.tail()
result_summary_df = exposure_keys_summary_df.merge(new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(new_tek_devices_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(confirmed_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df["tek_count_per_new_case"] = \
result_summary_df.tek_count / result_summary_df.rolling_mean_new_cases
result_summary_df["new_tek_count_per_new_case"] = \
result_summary_df.new_tek_count / result_summary_df.rolling_mean_new_cases
result_summary_df["new_tek_devices_per_new_case"] = \
result_summary_df.new_tek_devices / result_summary_df.rolling_mean_new_cases
result_summary_df["new_tek_count_per_new_tek_device"] = \
result_summary_df.new_tek_count / result_summary_df.new_tek_devices
result_summary_df.head()
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df.set_index("sample_date", inplace=True)
result_summary_df = result_summary_df.sort_index(ascending=False)
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[[
"tek_count",
"new_tek_count",
"new_cases",
"rolling_mean_new_cases",
"tek_count_per_new_case",
"new_tek_count_per_new_case",
"new_tek_devices",
"new_tek_devices_per_new_case",
"new_tek_count_per_new_tek_device"]]
result_summary_df
summary_ax_list = result_summary_df[[
"rolling_mean_new_cases",
"tek_count",
"new_tek_count",
"new_tek_devices",
"new_tek_count_per_new_tek_device",
"new_tek_devices_per_new_case"
]].sort_index(ascending=True).plot.bar(
title="Summary", rot=45, subplots=True, figsize=(15, 22))
summary_ax_list[-1].yaxis.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
hourly_summary_ax_list = hourly_summary_df.plot.bar(
title="Last 24h Summary", rot=45, subplots=True)
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
media_path = get_temporary_image_path()
dfi.export(df, media_path)
return media_path
summary_plots_image_path = save_temporary_plot_image(ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(df=result_summary_df)
hourly_summary_plots_image_path = save_temporary_plot_image(ax=hourly_summary_ax_list)
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(report_resources_path_prefix + "Summary-Table.html")
_ = shutil.copyfile(summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(summary_table_image_path, report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png")
report_daily_url_pattern = \
"https://github.com/pvieito/RadarCOVID-Report/blob/master/Notebooks/" \
"RadarCOVID-Report/{report_type}/RadarCOVID-Report-{report_date}.ipynb"
report_daily_url = report_daily_url_pattern.format(
report_type="Daily", report_date=extraction_date)
report_hourly_url = report_daily_url_pattern.format(
report_type="Hourly", report_date=extraction_date_with_hour)
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
summary_table_html = result_summary_df.to_html()
readme_contents = readme_contents.format(
summary_table_html=summary_table_html,
report_url_with_hour=report_hourly_url,
extraction_date_with_hour=extraction_date_with_hour)
with open("README.md", "w") as f:
f.write(readme_contents)
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule":
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
hourly_summary_plots_media = api.media_upload(hourly_summary_plots_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
hourly_summary_plots_media.media_id,
]
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
new_teks = extraction_date_result_summary_df.new_tek_count.sum().astype(int)
new_teks_last_hour = extraction_date_result_hourly_summary_df.new_tek_count.sum().astype(int)
new_devices = extraction_date_result_summary_df.new_tek_devices.sum().astype(int)
new_devices_last_hour = extraction_date_result_hourly_summary_df.new_tek_devices.sum().astype(int)
new_tek_count_per_new_tek_device = \
extraction_date_result_summary_df.new_tek_count_per_new_tek_device.sum()
new_tek_devices_per_new_case = \
extraction_date_result_summary_df.new_tek_devices_per_new_case.sum()
status = textwrap.dedent(f"""
Report Update – {extraction_date_with_hour}
#ExposureNotification #RadarCOVID
Shared Diagnoses Day Summary:
- New TEKs: {new_teks} ({new_teks_last_hour:+d} last hour)
- New Devices: {new_devices} ({new_devices_last_hour:+d} last hour, {new_tek_count_per_new_tek_device:.2} TEKs/device)
- Usage Ratio: {new_tek_devices_per_new_case:.2%} devices/case
Report Link: {report_hourly_url}
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
| 0.297674 | 0.327211 |
# Tutorial template
Here is some short text about what the tutorial will cover.
_Include this note block at the top of every code page:_
:::{note}
You can run this code directly in your browser by clicking on the rocket logo ( <i class="fas fa-rocket"></i> ) at the top of the page, and clicking 'Binder'. This will open a Jupyter Notebook in a [Binder](https://mybinder.org/) environment which is set up to contain everything you need to run the code. **Don't forget to save a local copy if you make any changes!**
If you prefer, you can download the Jupyter Notebook file to run locally, by clicking the download logo ( <i class="fas fa-download"></i> ) at the top of the page and selecting '.ipynb'.
If you are new to using Jupyter Notebooks, [this guide](https://www.dataquest.io/blog/jupyter-notebook-tutorial/) will help you get started.
:::
## Prerequisites
_All tutorials should have a prerequisites section; just say "none" if none are required.
If the prerequisites exist on the knowledgebase, please include links to them.
If the prerequisites do not exisit on the knowledgebase, raise a GitHub Issue to ensure they get added (it's preferable that all prerequisites be available on the knowledgebase, but we will relax this requirement while the site is in alpha)._
None.
## Introduction
_Explain the background and details of the tutorial here._
## Setup
_You can include anything here that has been explained in previous lessions, load data, libraries, etc. Each notebook file should run without additional dependencies, so use this section to ensure all necessary setup is complete._
```
import matplotlib.pyplot as plt
from scipy.io.wavfile import write
import numpy as np
```
_Note that we recommend all tutorials include visual and audio output wherever possible._
## Basic example
_The aim with all our tutorials is to introduce a basic working example as early as possible, so that new users can see the value right away. You can then introduce more details as you go on. See the [FDTD tutorial](../fdtd/tutorial1) for an example._
```
# Simple example code
```
## More details
_Once you have introduced the basic example, you can begin to build upon it howevery you like. Try not to make these sections too long._
Here's some more details and code relating to a specific aspect.
```
# And here is some more code
```
## Embedding code, images, math...
There's lots of information about how to embed code, images, etc. into Jupyter Notebooks in the [Jupyter Books documentation](https://jupyterbook.org/file-types/notebooks.html). MyST markdown is used in both the `.md` and `.ipynb` files throughout the Jupyter Book. For more information about MyST markdown, check out [the MyST guide in Jupyter Book](https://jupyterbook.org/content/myst.html), or see [the MyST markdown documentation](https://myst-parser.readthedocs.io/en/latest/).
The most common things you might want to do are embed images, like so:

Or $add_{math}$ and
$$
math^{blocks}
$$
using LaTeX formatting, like so...
$$
\begin{aligned}
\mbox{mean} la_{tex} \\ \\
math blocks
\end{aligned}
$$
## Summary
_Please include a few summary bullets describing the main take-aways from the tutorial._
* Bullet 1
* Bullet 2
* Bullet 3
|
github_jupyter
|
import matplotlib.pyplot as plt
from scipy.io.wavfile import write
import numpy as np
# Simple example code
# And here is some more code
| 0.225502 | 0.895705 |
<a href="https://colab.research.google.com/github/rselent/DS-Unit-2-Sprint-1-Linear-Models/blob/master/module3-ridge-regression/LS_DS_213_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Lambda School Data Science
*Unit 2, Sprint 1, Module 3*
---
# Ridge Regression
## Assignment
We're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices.
But not just for condos in Tribeca...
- [ ] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million.
- [ ] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.
- [ ] Do one-hot encoding of categorical features.
- [ ] Do feature selection with `SelectKBest`.
- [ ] Fit a ridge regression model with multiple features. Use the `normalize=True` parameter (or do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html) beforehand — use the scaler's `fit_transform` method with the train set, and the scaler's `transform` method with the test set)
- [ ] Get mean absolute error for the test set.
- [ ] As always, commit your notebook to your fork of the GitHub repo.
The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal.
## Stretch Goals
Don't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from.
- [ ] Add your own stretch goal(s) !
- [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! 💥
- [ ] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html).
- [ ] Learn more about feature selection:
- ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance)
- [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html)
- [mlxtend](http://rasbt.github.io/mlxtend/) library
- scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection)
- [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson.
- [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients.
- [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way.
- [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
```
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
import pandas as pd
import pandas_profiling
# Read New York City property sales data
df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv')
# Change column names: replace spaces with underscores
df.columns = [col.replace(' ', '_') for col in df]
# SALE_PRICE was read as strings.
# Remove symbols, convert to integer
df['SALE_PRICE'] = (
df['SALE_PRICE']
.str.replace('$','')
.str.replace('-','')
.str.replace(',','')
.astype(int)
)
# BOROUGH is a numeric column, but arguably should be a categorical feature,
# so convert it from a number to a string
df['BOROUGH'] = df['BOROUGH'].astype(str)
# Reduce cardinality for NEIGHBORHOOD feature
# Get a list of the top 10 neighborhoods
top10 = df['NEIGHBORHOOD'].value_counts()[:10].index
# At locations where the neighborhood is NOT in the top 10,
# replace the neighborhood with 'OTHER'
df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
```
|
github_jupyter
|
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
import pandas as pd
import pandas_profiling
# Read New York City property sales data
df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv')
# Change column names: replace spaces with underscores
df.columns = [col.replace(' ', '_') for col in df]
# SALE_PRICE was read as strings.
# Remove symbols, convert to integer
df['SALE_PRICE'] = (
df['SALE_PRICE']
.str.replace('$','')
.str.replace('-','')
.str.replace(',','')
.astype(int)
)
# BOROUGH is a numeric column, but arguably should be a categorical feature,
# so convert it from a number to a string
df['BOROUGH'] = df['BOROUGH'].astype(str)
# Reduce cardinality for NEIGHBORHOOD feature
# Get a list of the top 10 neighborhoods
top10 = df['NEIGHBORHOOD'].value_counts()[:10].index
# At locations where the neighborhood is NOT in the top 10,
# replace the neighborhood with 'OTHER'
df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
| 0.335024 | 0.959078 |
# Investigate the Data
Welcome to the cumulative project on clustering algorithms! In this project, we will be investigating the way people think about masculinity by applying the KMeans algorithm to data from <a href="https://fivethirtyeight.com/" target = "_blank">FiveThirtyEight</a>. FiveThirtyEight is a popular website known for their use of statistical analysis in many of their stories.
To begin, take a look at `masculinity-survey.pdf`. FiveThirtyEight and WNYC studios used this survey to get their male readers' thoughts on masculinity. After looking through some of the questions asked, take a look at FiveThirtyEight's article <a href="https://fivethirtyeight.com/features/what-do-men-think-it-means-to-be-a-man/" target = "_blank">What Do Men Think It Means To Be A Man?</a> to see their major takeaways. We're going to try to find more insights using machine learning.
In the code block below, we've loaded `masculinity.csv` into a DataFrame named `survey`. This file contains the raw responses to the masculinity survey. Let's start getting a sense of how this data is structured. Try to answer these questions using your Pandas knowledge:
* What are the names of the columns? How do those columns relate to the questions in the PDF?
* How many rows are there?
* How is a question with multiple parts, like question 7, represented in the DataFrame?
* How many people said they often ask a friend for professional advice? This is the first sub-question in question 7.
To answer that last question, use the `value_counts()` function. For example, `df["col_a"].value_counts()` gives you a nice summary of the values found in `"col_a"` of the DataFrame `df`.
You may also want to print `survey.head()` to get a sense of all of the columns.
```
import pandas as pd
survey = pd.read_csv("masculinity.csv")
print(survey.columns)
print(len(survey))
print(survey["q0007_0001"].value_counts())
print(survey.head())
```
# Mapping the Data
In order for us to start thinking about using the KMeans algorithm with this data, we need to first figure out how to turn these responses into numerical data. Let's once again consider question 7. We can't cluster the data using the phrases `"Often"` or `"Rarely"`, but we can turn those phrases into numbers. For example, we could map the data in the following way:
* `"Often"` -> `4`
* `"Sometimes"` -> `3`
* `"Rarely"` -> `2`
* `"Never, but open to it"` -> `1`
* `"Never, and not open to it"` -> `0`.
Note that it's important that these responses are somewhat linear. `"Often"` is at one end of the spectrum with `"Never, and not open to it"` at the other. The other values fall in sequence between the two. You could perform a similar mapping for the `"educ4"` responses (question 29), but there isn't an obvious linear progression in the `"racethn4"` responses (question 28).
In order to do this transformation, use the `map()` function. `map()` takes a dictionary as a parameter. For example, the following line of code would turn all the `"A"`s into `1`s and all the `"B"`s into `2`s in the column `"col_one"`.
```py
df["col_one"] = df["col_one"].map({"A": 1, "B": 2})
```
We've given you a list of the columns that should be mapped. Loop through the values of the list and map each column using the mapping described above.
Be careful of your spelling! Punctuation and whitespace is important. Take a look at the `value_counts()` of one of these columns to see if the mapping worked.
```
cols_to_map = ["q0007_0001", "q0007_0002", "q0007_0003", "q0007_0004",
"q0007_0005", "q0007_0006", "q0007_0007", "q0007_0008", "q0007_0009",
"q0007_0010", "q0007_0011"]
for col in cols_to_map:
survey[col] = survey[col].map({"Never, and not open to it": 0, "Never, but open to it": 1, "Rarely": 2, "Sometimes": 3, "Often": 4})
print(survey['q0007_0001'].value_counts())
```
# Plotting the Data
We now have 11 different features that we could use in our KMeans algorithm. Before we jump into clustering, let's graph some of these features on a 2D graph. Call `plt.scatter` using `survey["q0007_0001"]` and `survey["q0007_0002"]` as parameters. Include `alpha = 0.1`. We want to include `alpha` because many of the data points will be on top of each other. Adding `alpha` will make the points appear more solid if there are many stacked on top of each other.
Include axis labels on your graph. The x-axis corresponds with the first column you gave the `scatter()` function. So in this case, it corresponds to the question about asking a friend for professional advice.
Does it make sense that there are few points in the top left and bottom right corners of the graph? Why? Try graphing other dimensions against each other. Are there any combinations that give you surprising results?
```
from matplotlib import pyplot as plt
plt.scatter(survey["q0007_0001"], survey["q0007_0002"], alpha = 0.1)
plt.xlabel("Ask a friend for professional advice")
plt.ylabel("Ask a friend for personal advice")
plt.show()
```
# Build the KMeans Model
It's now time to start clustering! There are so many interesting questions we could ask about this data. Let's start by seeing if clusters form based on traditionally masculine concepts.
Take a look at the first four sub-questions in question 7. Those four activities aren't necessarily seen as traditionally masculine. On the other hand, sub-questions 5, 8, and 9 are often seen as very masculine activities. What would happen if we found 2 clusters based on those 7 questions? Would we find clusters that represent traditionally feminine and traditionally masculine people? Let's find out.
We need to first drop all of the rows that contain a `NaN` value in any of the columns we're interested in. Create a new variable named `rows_to_cluster` and set it equal to the result of calling `dropna` on `survey`. `dropna` should have a parameter `subset` equal to a list of the 7 columns we want. If you don't include `subset`, the function will drop all rows that have an `NaN` in *any* column. This would drop almost all the rows in the dataframe!
Create a `KMeans` object named `classifier` where `n_clusters = 2`. Call `classifier`'s `.fit()` method. The parameter of `.fit()` should be the 7 columns we're interested in. For example, the following line of code will fit the model based on the columns `"col_one"` and `"col_two"` of the Dataframe `df`.
```py
classifier.fit(df[["col_one", "col_two"]])
```
Make sure to only include the columns that you want to train off of. Make sure to use `rows_to_cluster` rather than `survey` to avoid including those `NaN`s!
After fitting your model, print out the model's `cluster_centers_`.
```
from sklearn.cluster import KMeans
rows_to_cluster = survey.dropna(subset = ["q0007_0001", "q0007_0002", "q0007_0003", "q0007_0004", "q0007_0005", "q0007_0008", "q0007_0009"])
classifier = KMeans(n_clusters = 2)
classifier.fit(rows_to_cluster[["q0007_0001", "q0007_0002", "q0007_0003", "q0007_0004", "q0007_0005", "q0007_0008", "q0007_0009"]])
print(classifier.cluster_centers_)
```
# Separate the Cluster Members
When we look at the two clusters, the first four numbers represent the traditionally feminine activities and the last three represent the traditionally masculine activities. If the data points separated into a feminine cluser and a masculine cluseter, we would expect to see one cluster to have high values for the first four numbers and the other cluster to have high values for the last three numbers.
Instead, the first cluster has a higher value in every feature. Since a higher number means the person was more likely to "often" do something, the clusters seem to represent "people who do things" and "people who don't do things".
We might be able to find out more information about these clusters by looking at the specific members of each cluster. Print `classifier.labels_`. This list shows which cluster every row in the DataFrame corresponds to.
For example, if `classifier.labels_` was `[1, 0 ,1]`, then the first row in the DataFrame would be in cluster one, the second row would be in cluster 0, and the third row would be in cluster one. A row represents one persons answers to every question in the survey.
Create two new empty lists named `cluster_zero_indices` and `cluster_one_indices`. Loop through `classifier.labels_` and whenever a label is `0` add that index to `cluster_zero_indices`. Do the same whenever a label is a `1`.
Print `cluster_zero_indices`
```
print(classifier.labels_)
cluster_zero_indices = []
cluster_one_indices = []
for i in range(len(classifier.labels_)):
if classifier.labels_[i] == 0:
cluster_zero_indices.append(i)
elif classifier.labels_[i] == 1:
cluster_one_indices.append(i)
print(cluster_one_indices)
```
# Investigate the Cluster Members
Now that we have the indices for each cluster, let's look at some stats about these two clusters. You can get the rows of the DataFrame that correspond to cluster zero by doing the following:
```py
cluster_zero_df = rows_to_cluster.iloc[cluster_zero_indices]
```
Do the same for `cluster_one_df`.
Finally, let's look at some information about these two clusters. Print the `value_counts()` of the `educ4` column of each cluster. What do you notice? Try looking at different columns. For example, are the people in cluster zero significantly older than those in cluster one? You can look at the `age3` column to see.
If you divide the result of `value_counts()` by the size of the cluster, you get the percentage of people in each category rather than the total number. This will make it easier to compare the two clusters.
```
cluster_zero_df = rows_to_cluster.iloc[cluster_zero_indices]
cluster_one_df = rows_to_cluster.iloc[cluster_one_indices]
print(cluster_zero_df['educ4'].value_counts()/len(cluster_zero_df))
print(cluster_one_df['educ4'].value_counts()/len(cluster_one_df))
```
# Explore on Your Own
Great work! You've found out that by answering those 7 questions people don't fall into a "masculine" category or a "feminine" category. Instead, they seem to be divided by their level of education!
Now it's time for you to explore this data on your own. In this project, we've really focused on question 7 and its sub-questions. Take a look at some of the other questions in the survey and try to ask yourself some interesting questions. Here's a list of questions you could dive into:
* Which demographic features have stronger correlations with ideas of masculinity (sexual orientation, age, race, marital status, parenthood?)
* Are certain beliefs or actions linked to more self-described masculine or feminine individuals?
* How do insecurities change as people grow older?
Special thanks to the team at FiveThirtyEight and specifically Dhrumil Mehta for giving us access to the data!
|
github_jupyter
|
import pandas as pd
survey = pd.read_csv("masculinity.csv")
print(survey.columns)
print(len(survey))
print(survey["q0007_0001"].value_counts())
print(survey.head())
df["col_one"] = df["col_one"].map({"A": 1, "B": 2})
cols_to_map = ["q0007_0001", "q0007_0002", "q0007_0003", "q0007_0004",
"q0007_0005", "q0007_0006", "q0007_0007", "q0007_0008", "q0007_0009",
"q0007_0010", "q0007_0011"]
for col in cols_to_map:
survey[col] = survey[col].map({"Never, and not open to it": 0, "Never, but open to it": 1, "Rarely": 2, "Sometimes": 3, "Often": 4})
print(survey['q0007_0001'].value_counts())
from matplotlib import pyplot as plt
plt.scatter(survey["q0007_0001"], survey["q0007_0002"], alpha = 0.1)
plt.xlabel("Ask a friend for professional advice")
plt.ylabel("Ask a friend for personal advice")
plt.show()
classifier.fit(df[["col_one", "col_two"]])
from sklearn.cluster import KMeans
rows_to_cluster = survey.dropna(subset = ["q0007_0001", "q0007_0002", "q0007_0003", "q0007_0004", "q0007_0005", "q0007_0008", "q0007_0009"])
classifier = KMeans(n_clusters = 2)
classifier.fit(rows_to_cluster[["q0007_0001", "q0007_0002", "q0007_0003", "q0007_0004", "q0007_0005", "q0007_0008", "q0007_0009"]])
print(classifier.cluster_centers_)
print(classifier.labels_)
cluster_zero_indices = []
cluster_one_indices = []
for i in range(len(classifier.labels_)):
if classifier.labels_[i] == 0:
cluster_zero_indices.append(i)
elif classifier.labels_[i] == 1:
cluster_one_indices.append(i)
print(cluster_one_indices)
cluster_zero_df = rows_to_cluster.iloc[cluster_zero_indices]
cluster_zero_df = rows_to_cluster.iloc[cluster_zero_indices]
cluster_one_df = rows_to_cluster.iloc[cluster_one_indices]
print(cluster_zero_df['educ4'].value_counts()/len(cluster_zero_df))
print(cluster_one_df['educ4'].value_counts()/len(cluster_one_df))
| 0.206254 | 0.993015 |
```
# Import list
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
import joblib as jb
os.chdir('..')
import AstroPack as AP
os.chdir('./final_models')
from matplotlib import rc
rc('text', usetex=True)
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import RFE
from sklearn.metrics import (mean_absolute_error, median_absolute_error, r2_score, max_error,
mean_squared_error,explained_variance_score)
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.pipeline import Pipeline
%matplotlib inline
```
# Getting the data
### Hyperparameter tuning
```
# Get the hyperparameter tuning results
os.chdir('../hyperparameter_tuning/teff')
teff_models = pd.read_csv('rf_teff_tuning.csv')
os.chdir('../logg')
logg_models = pd.read_csv('rf_logg_tuning.csv')
os.chdir('../feh')
feh_models = pd.read_csv('rf_FeH_tuning.csv')
os.chdir('../../final_models')
```
### Stars data (JPLUS + WISE + GAIA + LAMOST)
```
# Create a list of the columns that will be used
column_list = ['TILE_ID', 'NUMBER', 'teff', 'teff_err', 'logg', 'logg_err', 'feh', 'feh_err'] + AP.Filters['JPLUS'] + AP.Filters['WISE'] + AP.Filters['GAIA']
# Import the full dataframe with stars that have both JPLUS, WISE and LAMOST data
os.chdir('../data')
stars_raw = pd.read_csv('STEPEs Input Data.csv', usecols=column_list)
os.chdir('../final_models')
# Drop any row with missing values from the dataframe
stars_raw = stars_raw.dropna()
# Filter the stars according to their parameter errors
stars_raw = stars_raw[stars_raw['teff_err'] <= 300]
stars_raw = stars_raw[stars_raw['logg_err'] <= 0.2]
stars_raw = stars_raw[stars_raw['feh_err'] <= 0.4]
# Convert it into a dataframe with magnitudes and colors, indexed by the TILE ID and NUMBER of the star
stars_raw, stars = AP.AssembleWorkingDF(stars_raw, addWISE=True, addGALEX=False, addGAIA=True, Colors=True, Combinations=False)
stellar_parameters = stars_raw[['teff', 'logg', 'feh']]
```
# Teff predictor
### Model ranking
We first check the results from the hyperparameter optimization
```
# Print the final ranking of models
teff_models[['n_features', 'max_features', 'n_trees', 'min_samples_leaf', 'R2', 'StdR2']].sort_values(by = 'R2', ascending = False).head(5)
```
### Model training
We then choose the best hyperparameter combination (n_features = 60, max_features = 0.25, n_trees = 100 and msl = 1) and train a model using that
```
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Get the effective temperatures
y_train_teff = y_train['teff']
y_test_teff = y_test['teff']
# Initialize the optimized feature selector
feature_selector = RFE(estimator=DecisionTreeRegressor(),
n_features_to_select = 60,
verbose = 0, step = 200)
# Initialize the optimized random forest
rf = RandomForestRegressor(n_estimators=100,
max_features=0.25,
min_samples_leaf = 1)
# Create a pipeline with the feature selector and the random forest
rf_teff_pipeline = Pipeline(steps = [('Feature Selector', feature_selector),('Model', rf)])
# Fit the pipeline to the training data
rf_teff_pipeline = rf_teff_pipeline.fit(x_train, y_train_teff.values.reshape(len(y_train_teff)))
# Save the pipeline to a file
jb.dump(rf_teff_pipeline, open('rf_teff_estimator/pipeline.sav', 'wb'), compress = 9)
```
### Model testing
Having trained the model, the next step is to test it
```
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Load the pipeline from its file
rf_teff_pipeline = jb.load(open('rf_teff_estimator/pipeline.sav', 'rb'))
# Predict the temperatures for the test sample
teff_predictions_rf = rf_teff_pipeline.predict(x_test)
# Calculate the error metrics and print them to the screen
MAE = mean_absolute_error(y_test_teff, teff_predictions_rf)
RMSE = np.sqrt(mean_squared_error(y_test_teff, teff_predictions_rf))
MaxE = max_error(y_test_teff, teff_predictions_rf)
R2 = r2_score(y_test_teff, teff_predictions_rf)
print('Mean Absolute Error: {:.3f}'.format(MAE))
print('Root Mean Squared Error: {:.3f}'.format(RMSE))
print('Max Error: {:.3f}'.format(MaxE))
print('R2 Score: {:.3f}'.format(R2))
# Plot the prediction and error graphs and save them
teff_test_results = AP.plot_test_graphs(y_test_teff, teff_predictions_rf, r'$\mathbf{T_{eff}}$ (K)',
parameter_range = [3500, 9000], error_range = [-750, 750], color = 'red')
teff_test_results.savefig('rf_teff_estimator/test_results.jpg', dpi = 250)
```
# logg predictor
### Model ranking
Here, we check the 5 best hyperparameter combinations based on the R2 score:
```
# Print the final ranking of models
logg_models[['n_features', 'max_features', 'n_trees', 'min_samples_leaf', 'R2', 'StdR2']].sort_values(by = 'R2', ascending = False).head(5)
```
### Model Training
Here, we choose the best hyperparameter combination (n_features = 45, max_features = 0.25, n_trees = 100 and msl = 1) and train a model using that
```
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Get the surface gravities
y_train_logg = y_train['logg']
y_test_logg = y_test['logg']
# Initialize the optimized feature selector
feature_selector = RFE(estimator=DecisionTreeRegressor(),
n_features_to_select = 45,
verbose = 0, step = 200)
# Initialize the optimized random forest
rf = RandomForestRegressor(n_estimators=100,
max_features=0.25,
min_samples_leaf = 1)
# Create a pipeline with the feature selector and the random forest
rf_logg_pipeline = Pipeline(steps = [('Feature Selector', feature_selector),('Model', rf)])
# Fit the pipeline to the training data
rf_logg_pipeline = rf_logg_pipeline.fit(x_train, y_train_logg.values.reshape(len(y_train_logg)))
# Save the pipeline to a file
jb.dump(rf_logg_pipeline, open('rf_logg_estimator/pipeline.sav', 'wb'), compress = 9)
```
### Model Testing
Having trained the model, the next step is to test it
```
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Load the pipeline from its file
rf_logg_pipeline = jb.load(open('rf_logg_estimator/pipeline.sav', 'rb'))
# Predict the gravities for the test sample
logg_predictions = rf_logg_pipeline.predict(x_test)
# Calculate the error metrics and print them to the screen
MAE = mean_absolute_error(y_test_logg, logg_predictions)
RMSE = np.sqrt(mean_squared_error(y_test_logg, logg_predictions))
MaxE = max_error(y_test_logg, logg_predictions)
R2 = r2_score(y_test_logg, logg_predictions)
print('Mean Absolute Error: {:.3f}'.format(MAE))
print('Root Mean Squared Error: {:.3f}'.format(RMSE))
print('Max Error: {:.3f}'.format(MaxE))
print('R2 Score: {:.3f}'.format(R2))
# Plot the prediction and error graphs and save them
logg_test_results = AP.plot_test_graphs(y_test_logg, logg_predictions, r'$\mathbf{logg}$',
parameter_range = [0.25, 5.0], error_range = [-1.5, 1.5], color = 'blue')
logg_test_results.savefig('rf_logg_estimator/test_results.jpg', dpi = 250)
```
# FeH predictor
### Model ranking
We first check the results from the hyperpameter optimization
```
feh_models[['n_features', 'max_features', 'n_trees', 'min_samples_leaf', 'R2', 'StdR2']].sort_values(by = 'R2', ascending = False).head(5)
```
### Model training
We then choose the best hyperparameter combination (n_features = 60, max_features = 0.25, n_trees = 100 and msl = 1) and train a model using that
```
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Get the metalicities
y_train_feh = y_train['feh']
y_test_feh = y_test['feh']
# Initialize the optimized feature selector
feature_selector = RFE(estimator=DecisionTreeRegressor(),
n_features_to_select = 45,
verbose = 0, step = 200)
# Initialize the optimized random forest
rf = RandomForestRegressor(n_estimators=100,
max_features=0.25,
min_samples_leaf=1)
# Create a pipeline with the feature selector and the random forest
rf_feh_pipeline = Pipeline(steps = [('Feature Selector', feature_selector),('Model', rf)])
# Fit the pipeline to the training data
rf_feh_pipeline = rf_feh_pipeline.fit(x_train, y_train_feh.values.reshape(len(y_train_feh)))
# Save the pipeline to a file
jb.dump(rf_feh_pipeline, open('rf_feh_estimator/pipeline.sav', 'wb'), compress = 9)
```
### Model Testing
Having trained the model, the next step is to test it
```
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Load the pipeline from its file
rf_feh_pipeline = jb.load(open('rf_feh_estimator/pipeline.sav', 'rb'))
# Predict the metalicities for the test sample
feh_predictions_rf = rf_feh_pipeline.predict(x_test)
# Calculate the error metrics and print them to the screen
MAE = mean_absolute_error(y_test_feh, feh_predictions_rf)
RMSE = np.sqrt(mean_squared_error(y_test_feh, feh_predictions_rf))
MaxE = max_error(y_test_feh, feh_predictions_rf)
R2 = r2_score(y_test_feh, feh_predictions_rf)
print('Mean Absolute Error: {:.3f}'.format(MAE))
print('Root Mean Squared Error: {:.3f}'.format(RMSE))
print('Max Error: {:.3f}'.format(MaxE))
print('R2 Score: {:.3f}'.format(R2))
# Plot the prediction and error graphs and save them
feh_test_results = AP.plot_test_graphs(y_test_feh, feh_predictions_rf, r'$\mathbf{[Fe/H]}$',
parameter_range = [-2.5, 0.75], error_range = [-1.0, 1.0], color = 'green')
feh_test_results.savefig('rf_feh_estimator/test_results.jpg', dpi = 250)
```
|
github_jupyter
|
# Import list
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
import joblib as jb
os.chdir('..')
import AstroPack as AP
os.chdir('./final_models')
from matplotlib import rc
rc('text', usetex=True)
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import RFE
from sklearn.metrics import (mean_absolute_error, median_absolute_error, r2_score, max_error,
mean_squared_error,explained_variance_score)
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.pipeline import Pipeline
%matplotlib inline
# Get the hyperparameter tuning results
os.chdir('../hyperparameter_tuning/teff')
teff_models = pd.read_csv('rf_teff_tuning.csv')
os.chdir('../logg')
logg_models = pd.read_csv('rf_logg_tuning.csv')
os.chdir('../feh')
feh_models = pd.read_csv('rf_FeH_tuning.csv')
os.chdir('../../final_models')
# Create a list of the columns that will be used
column_list = ['TILE_ID', 'NUMBER', 'teff', 'teff_err', 'logg', 'logg_err', 'feh', 'feh_err'] + AP.Filters['JPLUS'] + AP.Filters['WISE'] + AP.Filters['GAIA']
# Import the full dataframe with stars that have both JPLUS, WISE and LAMOST data
os.chdir('../data')
stars_raw = pd.read_csv('STEPEs Input Data.csv', usecols=column_list)
os.chdir('../final_models')
# Drop any row with missing values from the dataframe
stars_raw = stars_raw.dropna()
# Filter the stars according to their parameter errors
stars_raw = stars_raw[stars_raw['teff_err'] <= 300]
stars_raw = stars_raw[stars_raw['logg_err'] <= 0.2]
stars_raw = stars_raw[stars_raw['feh_err'] <= 0.4]
# Convert it into a dataframe with magnitudes and colors, indexed by the TILE ID and NUMBER of the star
stars_raw, stars = AP.AssembleWorkingDF(stars_raw, addWISE=True, addGALEX=False, addGAIA=True, Colors=True, Combinations=False)
stellar_parameters = stars_raw[['teff', 'logg', 'feh']]
# Print the final ranking of models
teff_models[['n_features', 'max_features', 'n_trees', 'min_samples_leaf', 'R2', 'StdR2']].sort_values(by = 'R2', ascending = False).head(5)
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Get the effective temperatures
y_train_teff = y_train['teff']
y_test_teff = y_test['teff']
# Initialize the optimized feature selector
feature_selector = RFE(estimator=DecisionTreeRegressor(),
n_features_to_select = 60,
verbose = 0, step = 200)
# Initialize the optimized random forest
rf = RandomForestRegressor(n_estimators=100,
max_features=0.25,
min_samples_leaf = 1)
# Create a pipeline with the feature selector and the random forest
rf_teff_pipeline = Pipeline(steps = [('Feature Selector', feature_selector),('Model', rf)])
# Fit the pipeline to the training data
rf_teff_pipeline = rf_teff_pipeline.fit(x_train, y_train_teff.values.reshape(len(y_train_teff)))
# Save the pipeline to a file
jb.dump(rf_teff_pipeline, open('rf_teff_estimator/pipeline.sav', 'wb'), compress = 9)
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Load the pipeline from its file
rf_teff_pipeline = jb.load(open('rf_teff_estimator/pipeline.sav', 'rb'))
# Predict the temperatures for the test sample
teff_predictions_rf = rf_teff_pipeline.predict(x_test)
# Calculate the error metrics and print them to the screen
MAE = mean_absolute_error(y_test_teff, teff_predictions_rf)
RMSE = np.sqrt(mean_squared_error(y_test_teff, teff_predictions_rf))
MaxE = max_error(y_test_teff, teff_predictions_rf)
R2 = r2_score(y_test_teff, teff_predictions_rf)
print('Mean Absolute Error: {:.3f}'.format(MAE))
print('Root Mean Squared Error: {:.3f}'.format(RMSE))
print('Max Error: {:.3f}'.format(MaxE))
print('R2 Score: {:.3f}'.format(R2))
# Plot the prediction and error graphs and save them
teff_test_results = AP.plot_test_graphs(y_test_teff, teff_predictions_rf, r'$\mathbf{T_{eff}}$ (K)',
parameter_range = [3500, 9000], error_range = [-750, 750], color = 'red')
teff_test_results.savefig('rf_teff_estimator/test_results.jpg', dpi = 250)
# Print the final ranking of models
logg_models[['n_features', 'max_features', 'n_trees', 'min_samples_leaf', 'R2', 'StdR2']].sort_values(by = 'R2', ascending = False).head(5)
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Get the surface gravities
y_train_logg = y_train['logg']
y_test_logg = y_test['logg']
# Initialize the optimized feature selector
feature_selector = RFE(estimator=DecisionTreeRegressor(),
n_features_to_select = 45,
verbose = 0, step = 200)
# Initialize the optimized random forest
rf = RandomForestRegressor(n_estimators=100,
max_features=0.25,
min_samples_leaf = 1)
# Create a pipeline with the feature selector and the random forest
rf_logg_pipeline = Pipeline(steps = [('Feature Selector', feature_selector),('Model', rf)])
# Fit the pipeline to the training data
rf_logg_pipeline = rf_logg_pipeline.fit(x_train, y_train_logg.values.reshape(len(y_train_logg)))
# Save the pipeline to a file
jb.dump(rf_logg_pipeline, open('rf_logg_estimator/pipeline.sav', 'wb'), compress = 9)
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Load the pipeline from its file
rf_logg_pipeline = jb.load(open('rf_logg_estimator/pipeline.sav', 'rb'))
# Predict the gravities for the test sample
logg_predictions = rf_logg_pipeline.predict(x_test)
# Calculate the error metrics and print them to the screen
MAE = mean_absolute_error(y_test_logg, logg_predictions)
RMSE = np.sqrt(mean_squared_error(y_test_logg, logg_predictions))
MaxE = max_error(y_test_logg, logg_predictions)
R2 = r2_score(y_test_logg, logg_predictions)
print('Mean Absolute Error: {:.3f}'.format(MAE))
print('Root Mean Squared Error: {:.3f}'.format(RMSE))
print('Max Error: {:.3f}'.format(MaxE))
print('R2 Score: {:.3f}'.format(R2))
# Plot the prediction and error graphs and save them
logg_test_results = AP.plot_test_graphs(y_test_logg, logg_predictions, r'$\mathbf{logg}$',
parameter_range = [0.25, 5.0], error_range = [-1.5, 1.5], color = 'blue')
logg_test_results.savefig('rf_logg_estimator/test_results.jpg', dpi = 250)
feh_models[['n_features', 'max_features', 'n_trees', 'min_samples_leaf', 'R2', 'StdR2']].sort_values(by = 'R2', ascending = False).head(5)
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Get the metalicities
y_train_feh = y_train['feh']
y_test_feh = y_test['feh']
# Initialize the optimized feature selector
feature_selector = RFE(estimator=DecisionTreeRegressor(),
n_features_to_select = 45,
verbose = 0, step = 200)
# Initialize the optimized random forest
rf = RandomForestRegressor(n_estimators=100,
max_features=0.25,
min_samples_leaf=1)
# Create a pipeline with the feature selector and the random forest
rf_feh_pipeline = Pipeline(steps = [('Feature Selector', feature_selector),('Model', rf)])
# Fit the pipeline to the training data
rf_feh_pipeline = rf_feh_pipeline.fit(x_train, y_train_feh.values.reshape(len(y_train_feh)))
# Save the pipeline to a file
jb.dump(rf_feh_pipeline, open('rf_feh_estimator/pipeline.sav', 'wb'), compress = 9)
# Split the full sample into training and test samples
x_train, x_test, y_train, y_test = train_test_split(stars, stellar_parameters, test_size=0.25, random_state=42)
# Load the pipeline from its file
rf_feh_pipeline = jb.load(open('rf_feh_estimator/pipeline.sav', 'rb'))
# Predict the metalicities for the test sample
feh_predictions_rf = rf_feh_pipeline.predict(x_test)
# Calculate the error metrics and print them to the screen
MAE = mean_absolute_error(y_test_feh, feh_predictions_rf)
RMSE = np.sqrt(mean_squared_error(y_test_feh, feh_predictions_rf))
MaxE = max_error(y_test_feh, feh_predictions_rf)
R2 = r2_score(y_test_feh, feh_predictions_rf)
print('Mean Absolute Error: {:.3f}'.format(MAE))
print('Root Mean Squared Error: {:.3f}'.format(RMSE))
print('Max Error: {:.3f}'.format(MaxE))
print('R2 Score: {:.3f}'.format(R2))
# Plot the prediction and error graphs and save them
feh_test_results = AP.plot_test_graphs(y_test_feh, feh_predictions_rf, r'$\mathbf{[Fe/H]}$',
parameter_range = [-2.5, 0.75], error_range = [-1.0, 1.0], color = 'green')
feh_test_results.savefig('rf_feh_estimator/test_results.jpg', dpi = 250)
| 0.723407 | 0.772101 |
# Solutions for chapter 9 exercises
## Set up
```
# Common libraries
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
from statsmodels.formula.api import ols
import seaborn as sns
# Chapter-specific libraries
import random # For functions sample() and shuffle()
from sklearn.preprocessing import MinMaxScaler # To rescale numeric variables
from sklearn.preprocessing import OneHotEncoder # To one-hot encode cat. variables
#Loading the data
dat_df = pd.read_csv("Karlan_List_exercises_data.csv")
dat_df.head(5)
# Reformatting categorical variables
#dat_df['group'] = pd.Categorical(dat_df['group'], ordered = True, categories = ['ctrl', 'treat1', 'treat2', 'treat3'])
#dat_df['gender'] = pd.Categorical(dat_df['gender'], ordered = True, categories = ['male', 'female'])
#dat_df['state_pol'] = pd.Categorical(dat_df['state_pol'], ordered = True, categories = ['blue', 'red'])
#dat_df['county_pol'] = pd.Categorical(dat_df['county_pol'], ordered = True, categories = ['blue', 'red'])
```
# Exercise 1 - stratified randomization
## 1) Traditional randomization.
Let’s determine the CI for the difference in donations between two groups with no true differences.
a) Select only the control group (i.e., subjects with no effect) and delete the Group variable from it. Create an ID variable that indexes the rows in your dataset. Create a Taste variable which takes with equal probabilities the values “vanilla” and chocolate”.
```
dat_df_ctrl = dat_df.copy().loc[dat_df['group']=='ctrl'].drop('group', axis=1)
dat_df_ctrl.reset_index(inplace=True)
dat_df1 = dat_df_ctrl.copy()
dat_df1['assgnt'] = np.random.uniform(0,1,len(dat_df1))
dat_df1['taste'] = 'vanilla'
dat_df1.loc[dat_df1['assgnt'].between(0, 0.5, inclusive=True),
'taste'] = 'chocolate'
dat_df1 = dat_df1.drop('assgnt', axis=1)
dat_df1.head(5)
```
b) Calculate the 90%-CI for the difference in donation amount between the two taste groups.
```
# Metric function
def metric_fun(dat_df):
model = ols("amount~taste+gender+state_pol+county_pol", data=dat_df)
res = model.fit(disp=0)
coeff = res.params['taste[T.vanilla]']
return coeff
metric_fun(dat_df1)
def boot_CI_fun(dat_df, metric_fun, B = 100, conf_level = 0.9):
#Setting sample size
N = len(dat_df)
coeffs = []
for i in range(B):
sim_data_df = dat_df.sample(n=N, replace = True)
coeff = metric_fun(sim_data_df)
coeffs.append(coeff)
coeffs.sort()
start_idx = round(B * (1 - conf_level) / 2)
end_idx = - round(B * (1 - conf_level) / 2)
confint = [coeffs[start_idx], coeffs[end_idx]]
return(confint)
boot_CI_fun(dat_df1, metric_fun, B = 200)
```
## 2) Stratified randomization.
We’ll repeat the process from question 1, but this time stratify the allocation of subjects between vanilla and chocolate taste. Before doing any math: do you expect the CI to be larger or smaller than in the previous question?
*Because stratification reduces the noise around the true value, we should expect a smaller CI.*
a) Copy and paste the necessary functions from the production code folder (stratification.data.prep and stratified.allocation in R, strat_prep_fun and stratified_assgnt_fun in Python). Assign Taste through stratified randomization on the background variables (Gender, Freq, StatePol, CountyPol, Dormant).
```
def strat_prep_fun(dat_df, id_var):
#Isolating the identification variable
assert id_var in dat_df.columns,\
"the id_var string doesn't match any column name"
dat_out_np = np.array(dat_df.loc[:,id_var].values.tolist())
dat_out_np = np.reshape(dat_out_np, (len(dat_out_np), 1))
dat_df = dat_df.drop([id_var], axis=1)
#Input validation
assert dat_df.select_dtypes(exclude = ['int64', 'float64', 'object', 'category']).empty,\
"please format all data columns to numeric, integer, category or character (for categorical variables)"
## Handling categorical variables
cat_df = dat_df.copy().select_dtypes(include = 'object') #Categorical vars
if not cat_df.empty:
# One-hot encoding all categorical variables
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(cat_df)
cat_np = enc.transform(cat_df).toarray()
dat_out_np = np.concatenate((dat_out_np, cat_np), axis=1)
## Handling numerical variables
num_df = dat_df.copy().select_dtypes(include = ['int64', 'float64']) #Numeric vars
if not num_df.empty:
# Normalizing all numeric variables to [0,1]
scaler = MinMaxScaler()
scaler.fit(num_df)
num_np = scaler.transform(num_df)
dat_out_np = np.concatenate((dat_out_np, num_np), axis=1)
return dat_out_np
def stratified_assgnt_fun(dat_df, id_var, n_groups = 2, group_var_name = "group"):
#Handling situations where the number of rows is not divisible by the number
#of groups. NOTE: I'll try to implement a better solution when I can
remainder = len(dat_df) % n_groups
if remainder != 0:
dat_df = dat_df.head(len(dat_df)-remainder)
#Prepping the data
data_np = strat_prep_fun(dat_df, id_var)
#Isolating the identification variable
dat_ID = data_np[:,0].tolist() # Extract ID for later join
data_np = data_np[:,1:].astype(float)
## Matching algorithm
#Setup
N = len(data_np)
match_len = n_groups - 1 # Number of matches we want to find
#Calculate distance matrix
from scipy.spatial import distance_matrix
d_mat = distance_matrix(data_np, data_np)
np.fill_diagonal(d_mat,N+1)
# Set up variables
rows = [i for i in range(N)]
available = rows.copy()
matches_lst = []
matches_lst_lim = int(N/n_groups)
closest = np.argpartition(d_mat, kth=match_len-1,axis=1)
for n in rows:
if len(matches_lst) == matches_lst_lim: break
if n in available:
for search_lim in range(match_len, N):
closest_matches = closest[n,:search_lim].tolist()
matches = list(set(available) & set(closest_matches))
if len(matches) == match_len:
matches.append(n)
matches_lst.append(matches)
available = [m for m in available if m not in matches]
break
#Handling ties from argpartition
elif len(matches) > match_len:
matches = [x for _, x in sorted(zip(d_mat[n,matches].tolist(), matches))]
matches = matches[0:match_len]
matches.append(n)
matches_lst.append(matches)
available = [m for m in available if m not in matches]
break
else:
closest[n,:] = np.argpartition(d_mat[n,:], kth=search_lim)
#Assigning experimental groups to the matched sets
exp_grps = np.array(list(range(n_groups))*(int(N/n_groups))).reshape((int(N/n_groups),n_groups))
exp_grps = exp_grps.tolist()
for j in exp_grps:
np.random.shuffle(j)
#flattening the two lists
import itertools
exp_grps = list(itertools.chain(*exp_grps))
matches_lst2 = list(itertools.chain(*matches_lst))
exp_grps2 = [x for _,x in sorted(zip(matches_lst2,exp_grps))]
assgnt_df = pd.DataFrame(exp_grps2, columns=[group_var_name])
assgnt_df[group_var_name] = assgnt_df[group_var_name].astype(str)
assgnt_df[id_var] = dat_ID
dat_df = dat_df.merge(assgnt_df, on=id_var, how='inner')
return dat_df
#Isolating the target and grouping variables
dat_df2 = dat_df_ctrl.copy()
dat_df2_outcomes = dat_df2.loc[:, ['index', 'gave', 'amount']]
dat_df2_outcomes.head(5)
# Keeping only the variables used for stratification
dat_df2 = dat_df2.drop(['gave','amount'], axis=1)
stratified_data_df = stratified_assgnt_fun(dat_df2, id_var = 'index', n_groups = 2, group_var_name = "taste")
#Renaming the values of the Taste variable
stratified_data_df.loc[stratified_data_df['taste']=='0','taste'] = 'vanilla'
stratified_data_df.loc[stratified_data_df['taste']=='1','taste'] = 'chocolate'
stratified_data_df.head(5)
#Bringing back together the stratified data and the outcome variables
stratified_data_df['gave'] = dat_df2_outcomes['gave']
stratified_data_df['amount'] = dat_df2_outcomes['amount']
stratified_data_df.head(5)
boot_CI_fun(stratified_data_df, metric_fun, B = 100)
```
# Exercise 2 - advanced
a) Calculate the 90%-CI for the effect of each of the matching ratios on the amount given (compared to the control group).
```
# Metric functions
def metric_fun1(dat_df):
model = ols("amount~group+gender+state_pol+county_pol", data=dat_df)
res = model.fit(disp=0)
coeff = res.params['group[T.treat1]']
return coeff
def metric_fun2(dat_df):
model = ols("amount~group+gender+state_pol+county_pol", data=dat_df)
res = model.fit(disp=0)
coeff = res.params['group[T.treat2]']
return coeff
def metric_fun3(dat_df):
model = ols("amount~group+gender+state_pol+county_pol", data=dat_df)
res = model.fit(disp=0)
coeff = res.params['group[T.treat3]']
return coeff
print("90%-CI for the effect of treatment 1: ", boot_CI_fun(dat_df, metric_fun1, B = 200), "\n")
print("90%-CI for the effect of treatment 2: ", boot_CI_fun(dat_df, metric_fun2, B = 200), "\n")
print("90%-CI for the effect of treatment 3: ", boot_CI_fun(dat_df, metric_fun3, B = 200), "\n")
```
b) Is the effect of the 3:1 matching ratio different at the 90% level from the 2:1 ratio? (Trick question!)
```
# Metric function for difference
def metric_fun_diff(dat_df):
model = ols("amount~group+gender+state_pol+county_pol", data=dat_df)
res = model.fit(disp=0)
coeff2 = res.params['group[T.treat2]']
coeff3 = res.params['group[T.treat3]']
diff = coeff3 - coeff2
return diff
print("difference between effect of treatments 3 and 2: ", metric_fun_diff(dat_df), "\n")
print("90%-CI for difference between effect of treatments 3 and 2: ", boot_CI_fun(dat_df, metric_fun_diff, B = 200), "\n")
```
The "trick" here is that to correctly answer that question, you need to calculate the CI of the difference, you cannot directly compare the two CIs from question a) to each other. As you can see, the difference can be as low as -0.24 and as high as 0.15, which is not the values we would naively get by comparing the bounds of the CIs for the two estimates.
|
github_jupyter
|
# Common libraries
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
from statsmodels.formula.api import ols
import seaborn as sns
# Chapter-specific libraries
import random # For functions sample() and shuffle()
from sklearn.preprocessing import MinMaxScaler # To rescale numeric variables
from sklearn.preprocessing import OneHotEncoder # To one-hot encode cat. variables
#Loading the data
dat_df = pd.read_csv("Karlan_List_exercises_data.csv")
dat_df.head(5)
# Reformatting categorical variables
#dat_df['group'] = pd.Categorical(dat_df['group'], ordered = True, categories = ['ctrl', 'treat1', 'treat2', 'treat3'])
#dat_df['gender'] = pd.Categorical(dat_df['gender'], ordered = True, categories = ['male', 'female'])
#dat_df['state_pol'] = pd.Categorical(dat_df['state_pol'], ordered = True, categories = ['blue', 'red'])
#dat_df['county_pol'] = pd.Categorical(dat_df['county_pol'], ordered = True, categories = ['blue', 'red'])
dat_df_ctrl = dat_df.copy().loc[dat_df['group']=='ctrl'].drop('group', axis=1)
dat_df_ctrl.reset_index(inplace=True)
dat_df1 = dat_df_ctrl.copy()
dat_df1['assgnt'] = np.random.uniform(0,1,len(dat_df1))
dat_df1['taste'] = 'vanilla'
dat_df1.loc[dat_df1['assgnt'].between(0, 0.5, inclusive=True),
'taste'] = 'chocolate'
dat_df1 = dat_df1.drop('assgnt', axis=1)
dat_df1.head(5)
# Metric function
def metric_fun(dat_df):
model = ols("amount~taste+gender+state_pol+county_pol", data=dat_df)
res = model.fit(disp=0)
coeff = res.params['taste[T.vanilla]']
return coeff
metric_fun(dat_df1)
def boot_CI_fun(dat_df, metric_fun, B = 100, conf_level = 0.9):
#Setting sample size
N = len(dat_df)
coeffs = []
for i in range(B):
sim_data_df = dat_df.sample(n=N, replace = True)
coeff = metric_fun(sim_data_df)
coeffs.append(coeff)
coeffs.sort()
start_idx = round(B * (1 - conf_level) / 2)
end_idx = - round(B * (1 - conf_level) / 2)
confint = [coeffs[start_idx], coeffs[end_idx]]
return(confint)
boot_CI_fun(dat_df1, metric_fun, B = 200)
def strat_prep_fun(dat_df, id_var):
#Isolating the identification variable
assert id_var in dat_df.columns,\
"the id_var string doesn't match any column name"
dat_out_np = np.array(dat_df.loc[:,id_var].values.tolist())
dat_out_np = np.reshape(dat_out_np, (len(dat_out_np), 1))
dat_df = dat_df.drop([id_var], axis=1)
#Input validation
assert dat_df.select_dtypes(exclude = ['int64', 'float64', 'object', 'category']).empty,\
"please format all data columns to numeric, integer, category or character (for categorical variables)"
## Handling categorical variables
cat_df = dat_df.copy().select_dtypes(include = 'object') #Categorical vars
if not cat_df.empty:
# One-hot encoding all categorical variables
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(cat_df)
cat_np = enc.transform(cat_df).toarray()
dat_out_np = np.concatenate((dat_out_np, cat_np), axis=1)
## Handling numerical variables
num_df = dat_df.copy().select_dtypes(include = ['int64', 'float64']) #Numeric vars
if not num_df.empty:
# Normalizing all numeric variables to [0,1]
scaler = MinMaxScaler()
scaler.fit(num_df)
num_np = scaler.transform(num_df)
dat_out_np = np.concatenate((dat_out_np, num_np), axis=1)
return dat_out_np
def stratified_assgnt_fun(dat_df, id_var, n_groups = 2, group_var_name = "group"):
#Handling situations where the number of rows is not divisible by the number
#of groups. NOTE: I'll try to implement a better solution when I can
remainder = len(dat_df) % n_groups
if remainder != 0:
dat_df = dat_df.head(len(dat_df)-remainder)
#Prepping the data
data_np = strat_prep_fun(dat_df, id_var)
#Isolating the identification variable
dat_ID = data_np[:,0].tolist() # Extract ID for later join
data_np = data_np[:,1:].astype(float)
## Matching algorithm
#Setup
N = len(data_np)
match_len = n_groups - 1 # Number of matches we want to find
#Calculate distance matrix
from scipy.spatial import distance_matrix
d_mat = distance_matrix(data_np, data_np)
np.fill_diagonal(d_mat,N+1)
# Set up variables
rows = [i for i in range(N)]
available = rows.copy()
matches_lst = []
matches_lst_lim = int(N/n_groups)
closest = np.argpartition(d_mat, kth=match_len-1,axis=1)
for n in rows:
if len(matches_lst) == matches_lst_lim: break
if n in available:
for search_lim in range(match_len, N):
closest_matches = closest[n,:search_lim].tolist()
matches = list(set(available) & set(closest_matches))
if len(matches) == match_len:
matches.append(n)
matches_lst.append(matches)
available = [m for m in available if m not in matches]
break
#Handling ties from argpartition
elif len(matches) > match_len:
matches = [x for _, x in sorted(zip(d_mat[n,matches].tolist(), matches))]
matches = matches[0:match_len]
matches.append(n)
matches_lst.append(matches)
available = [m for m in available if m not in matches]
break
else:
closest[n,:] = np.argpartition(d_mat[n,:], kth=search_lim)
#Assigning experimental groups to the matched sets
exp_grps = np.array(list(range(n_groups))*(int(N/n_groups))).reshape((int(N/n_groups),n_groups))
exp_grps = exp_grps.tolist()
for j in exp_grps:
np.random.shuffle(j)
#flattening the two lists
import itertools
exp_grps = list(itertools.chain(*exp_grps))
matches_lst2 = list(itertools.chain(*matches_lst))
exp_grps2 = [x for _,x in sorted(zip(matches_lst2,exp_grps))]
assgnt_df = pd.DataFrame(exp_grps2, columns=[group_var_name])
assgnt_df[group_var_name] = assgnt_df[group_var_name].astype(str)
assgnt_df[id_var] = dat_ID
dat_df = dat_df.merge(assgnt_df, on=id_var, how='inner')
return dat_df
#Isolating the target and grouping variables
dat_df2 = dat_df_ctrl.copy()
dat_df2_outcomes = dat_df2.loc[:, ['index', 'gave', 'amount']]
dat_df2_outcomes.head(5)
# Keeping only the variables used for stratification
dat_df2 = dat_df2.drop(['gave','amount'], axis=1)
stratified_data_df = stratified_assgnt_fun(dat_df2, id_var = 'index', n_groups = 2, group_var_name = "taste")
#Renaming the values of the Taste variable
stratified_data_df.loc[stratified_data_df['taste']=='0','taste'] = 'vanilla'
stratified_data_df.loc[stratified_data_df['taste']=='1','taste'] = 'chocolate'
stratified_data_df.head(5)
#Bringing back together the stratified data and the outcome variables
stratified_data_df['gave'] = dat_df2_outcomes['gave']
stratified_data_df['amount'] = dat_df2_outcomes['amount']
stratified_data_df.head(5)
boot_CI_fun(stratified_data_df, metric_fun, B = 100)
# Metric functions
def metric_fun1(dat_df):
model = ols("amount~group+gender+state_pol+county_pol", data=dat_df)
res = model.fit(disp=0)
coeff = res.params['group[T.treat1]']
return coeff
def metric_fun2(dat_df):
model = ols("amount~group+gender+state_pol+county_pol", data=dat_df)
res = model.fit(disp=0)
coeff = res.params['group[T.treat2]']
return coeff
def metric_fun3(dat_df):
model = ols("amount~group+gender+state_pol+county_pol", data=dat_df)
res = model.fit(disp=0)
coeff = res.params['group[T.treat3]']
return coeff
print("90%-CI for the effect of treatment 1: ", boot_CI_fun(dat_df, metric_fun1, B = 200), "\n")
print("90%-CI for the effect of treatment 2: ", boot_CI_fun(dat_df, metric_fun2, B = 200), "\n")
print("90%-CI for the effect of treatment 3: ", boot_CI_fun(dat_df, metric_fun3, B = 200), "\n")
# Metric function for difference
def metric_fun_diff(dat_df):
model = ols("amount~group+gender+state_pol+county_pol", data=dat_df)
res = model.fit(disp=0)
coeff2 = res.params['group[T.treat2]']
coeff3 = res.params['group[T.treat3]']
diff = coeff3 - coeff2
return diff
print("difference between effect of treatments 3 and 2: ", metric_fun_diff(dat_df), "\n")
print("90%-CI for difference between effect of treatments 3 and 2: ", boot_CI_fun(dat_df, metric_fun_diff, B = 200), "\n")
| 0.449393 | 0.909425 |
```
import sklearn
import numpy as np
import pandas
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Activation, Dropout
from keras.preprocessing.image import ImageDataGenerator
```
### Try Model from scratch
```
IMAGE_SIZE = 64
IMAGE_SIZE_TUPLE = (IMAGE_SIZE, IMAGE_SIZE)
IMAGE_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3)
batch_size = 32
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape = IMAGE_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(GlobalAveragePooling2D())
# model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
# model.add(Dense(64))
# model.add(Activation('relu'))
# model.add(Dropout(0.5))
model.add(Dense(3))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['categorical_accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory('data/train',
target_size = IMAGE_SIZE_TUPLE,
batch_size = batch_size)
val_set = test_datagen.flow_from_directory('data/valid',
target_size = IMAGE_SIZE_TUPLE,
batch_size = batch_size)
test_set = test_datagen.flow_from_directory('data/test',
target_size = IMAGE_SIZE_TUPLE,
batch_size = batch_size)
history = model.fit_generator(training_set,
steps_per_epoch=2000 // batch_size,
epochs = 1,
validation_data = val_set,
validation_steps=800 // batch_size)
```
### Try Model from pre-trained network on ImageNet
```
model.evaluate_generator(test_set,steps=100)
```
|
github_jupyter
|
import sklearn
import numpy as np
import pandas
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Activation, Dropout
from keras.preprocessing.image import ImageDataGenerator
IMAGE_SIZE = 64
IMAGE_SIZE_TUPLE = (IMAGE_SIZE, IMAGE_SIZE)
IMAGE_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3)
batch_size = 32
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape = IMAGE_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(GlobalAveragePooling2D())
# model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
# model.add(Dense(64))
# model.add(Activation('relu'))
# model.add(Dropout(0.5))
model.add(Dense(3))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['categorical_accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory('data/train',
target_size = IMAGE_SIZE_TUPLE,
batch_size = batch_size)
val_set = test_datagen.flow_from_directory('data/valid',
target_size = IMAGE_SIZE_TUPLE,
batch_size = batch_size)
test_set = test_datagen.flow_from_directory('data/test',
target_size = IMAGE_SIZE_TUPLE,
batch_size = batch_size)
history = model.fit_generator(training_set,
steps_per_epoch=2000 // batch_size,
epochs = 1,
validation_data = val_set,
validation_steps=800 // batch_size)
model.evaluate_generator(test_set,steps=100)
| 0.831656 | 0.71469 |
# Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were [first reported on](https://arxiv.org/abs/1406.2661) in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:
* [Pix2Pix](https://affinelayer.com/pixsrv/)
* [CycleGAN](https://github.com/junyanz/CycleGAN)
* [A whole list](https://github.com/wiseodd/generative-models)
The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks _as close as possible_ to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.

The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can fool the discriminator.
The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.
```
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
```
## Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input `inputs_real` and the generator input `inputs_z`. We'll assign them the appropriate sizes for each of the networks.
>**Exercise:** Finish the `model_inputs` function below. Create the placeholders for `inputs_real` and `inputs_z` using the input sizes `real_dim` and `z_dim` respectively.
```
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, [None, real_dim], name='input_real')
inputs_z = tf.placeholder(tf.float32, [None, z_dim], name='input_z')
return inputs_real, inputs_z
```
## Generator network

Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
#### Variable Scope
Here we need to use `tf.variable_scope` for two reasons. Firstly, we're going to make sure all the variable names start with `generator`. Similarly, we'll prepend `discriminator` to the discriminator variables. This will help out later when we're training the separate networks.
We could just use `tf.name_scope` to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also _sample from it_ as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the `reuse` keyword for `tf.variable_scope` to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use `tf.variable_scope`, you use a `with` statement:
```python
with tf.variable_scope('scope_name', reuse=False):
# code here
```
Here's more from [the TensorFlow documentation](https://www.tensorflow.org/programmers_guide/variable_scope#the_problem) to get another look at using `tf.variable_scope`.
#### Leaky ReLU
TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can just take the outputs from a linear fully connected layer and pass them to `tf.maximum`. Typically, a parameter `alpha` sets the magnitude of the output for negative values. So, the output for negative input (`x`) values is `alpha*x`, and the output for positive `x` is `x`:
$$
f(x) = max(\alpha * x, x)
$$
#### Tanh Output
The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.
>**Exercise:** Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the `reuse` keyword argument from the function to `tf.variable_scope`.
```
def LeakyReLU(tensor, alpha):
return tf.maximum(alpha * tensor, tensor)
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = LeakyReLU(h1, alpha)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
```
## Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
>**Exercise:** Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the `reuse` keyword argument from the function arguments to `tf.variable_scope`.
```
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = LeakyReLU(h1, alpha)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
```
## Hyperparameters
```
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
```
## Build network
Now we're building the network from the functions defined above.
First is to get our inputs, `input_real, input_z` from `model_inputs` using the sizes of the input and z.
Then, we'll create the generator, `generator(input_z, input_size)`. This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as `g_model`. So the real data discriminator is `discriminator(input_real)` while the fake discriminator is `discriminator(g_model, reuse=True)`.
>**Exercise:** Build the network from the functions you defined earlier.
```
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, out_dim=input_size, n_units=g_hidden_size, reuse=False, alpha=alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, reuse=False, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, n_units=d_hidden_size, reuse=True, alpha=alpha)
```
## Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_loss_real + d_loss_fake`. The losses will by sigmoid cross-entropies, which we can get with `tf.nn.sigmoid_cross_entropy_with_logits`. We'll also wrap that in `tf.reduce_mean` to get the mean for all the images in the batch. So the losses will look something like
```python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
```
For the real image logits, we'll use `d_logits_real` which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter `smooth`. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like `labels = tf.ones_like(tensor) * (1 - smooth)`
The discriminator loss for the fake data is similar. The logits are `d_logits_fake`, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using `d_logits_fake`, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
>**Exercise:** Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator.
```
# Calculate losses
true_labels_for_reals = tf.ones_like(d_logits_real) * (1 - smooth)
true_labels_for_fakes = tf.zeros_like(d_logits_fake)
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=true_labels_for_reals)
)
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=true_labels_for_fakes)
)
d_loss = d_loss_real + d_loss_fake
# generator wants to trick discriminator into having high logits for fake images.
# so for the generator's purposes, we flip the labels.
# And now since the generator can't control the inputs to the discriminator from real images,
# we will omit "flipped labels" that treat real images as fake,
# and focus solely on the flipped labels that treat fake images as real.
flipped_labels_for_fakes = tf.ones_like(d_logits_fake)
# implementation heuristic: don't use label smoothing for the generator labels.
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=flipped_labels_for_fakes)
)
```
## Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part and build optimizers for the two parts. To get all the trainable variables, we use `tf.trainable_variables()`. This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with `generator`. So, we just need to iterate through the list from `tf.trainable_variables()` and keep variables that start with `generator`. Each variable object has an attribute `name` which holds the name of the variable as a string (`var.name == 'weights_0'` for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with `discriminator`.
Then, in the optimizer we pass the variable lists to the `var_list` keyword argument of the `minimize` method. This tells the optimizer to only update the listed variables. Something like `tf.train.AdamOptimizer().minimize(loss, var_list=var_list)` will only train the variables in `var_list`.
>**Exercise: ** Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using `AdamOptimizer`, create an optimizer for each network that update the network variables separately.
```
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [v for v in t_vars if v.name.startswith('generator')]
d_vars = [v for v in t_vars if v.name.startswith('discriminator')]
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
```
## Training
```
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
```
## Training loss
Here we'll check out the training losses for the generator and discriminator.
```
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
```
## Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
```
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
```
These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
```
_ = view_samples(-1, samples)
```
Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
```
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
```
It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
## Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!
```
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
```
|
github_jupyter
|
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, [None, real_dim], name='input_real')
inputs_z = tf.placeholder(tf.float32, [None, z_dim], name='input_z')
return inputs_real, inputs_z
with tf.variable_scope('scope_name', reuse=False):
# code here
def LeakyReLU(tensor, alpha):
return tf.maximum(alpha * tensor, tensor)
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = LeakyReLU(h1, alpha)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = LeakyReLU(h1, alpha)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, out_dim=input_size, n_units=g_hidden_size, reuse=False, alpha=alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, reuse=False, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, n_units=d_hidden_size, reuse=True, alpha=alpha)
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
# Calculate losses
true_labels_for_reals = tf.ones_like(d_logits_real) * (1 - smooth)
true_labels_for_fakes = tf.zeros_like(d_logits_fake)
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=true_labels_for_reals)
)
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=true_labels_for_fakes)
)
d_loss = d_loss_real + d_loss_fake
# generator wants to trick discriminator into having high logits for fake images.
# so for the generator's purposes, we flip the labels.
# And now since the generator can't control the inputs to the discriminator from real images,
# we will omit "flipped labels" that treat real images as fake,
# and focus solely on the flipped labels that treat fake images as real.
flipped_labels_for_fakes = tf.ones_like(d_logits_fake)
# implementation heuristic: don't use label smoothing for the generator labels.
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=flipped_labels_for_fakes)
)
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [v for v in t_vars if v.name.startswith('generator')]
d_vars = [v for v in t_vars if v.name.startswith('discriminator')]
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
| 0.814975 | 0.991676 |
```
!rm -rf output-*/
```
## Test 1: discretize = True, use_ordinal_encoding = False
```
!mkdir -p output-1-parallel
! PYTHONPATH="$PYTHONPATH:../../" \
python -m FIDDLE.run \
--data_fname='./input/data.csv' \
--population_fname='./input/pop.csv' \
--config_fname='./input/config-1-parallel.yaml' \
--output_dir='./output-1-parallel/' \
--T=4 --dt=1.0 \
--theta_1=0.001 --theta_2=0.001 --theta_freq=1 \
--stats_functions 'min' 'max' 'mean'
import numpy as np
import pandas as pd
import json
import sparse
S = sparse.load_npz('output-1-parallel/S_all.npz')
S_names = json.load(open('output-1-parallel/S_all.feature_names.json', 'r'))
S_index = pd.read_csv('output-1-parallel/S.ID.csv').set_index(['ID'])
df_S = pd.DataFrame(S.todense(), columns=S_names, index=S_index.index)
X = sparse.load_npz('output-1-parallel/X_all.npz')
X_names = json.load(open('output-1-parallel/X_all.feature_names.json', 'r'))
X_index = pd.read_csv('output-1-parallel/X.ID,t_range.csv').set_index(['ID', 't_range'])
df_X = pd.DataFrame(X.todense().reshape(-1, X.shape[-1]), columns=X_names, index=X_index.index)
display(df_S)
display(df_X)
```
## Test 2: discretize = True, use_ordinal_encoding = True
```
!mkdir -p output-2-parallel
! PYTHONPATH="$PYTHONPATH:../../" \
python -m FIDDLE.run \
--data_fname='./input/data.csv' \
--population_fname='./input/pop.csv' \
--config_fname='./input/config-2-parallel.yaml' \
--output_dir='./output-2-parallel/' \
--T=4 --dt=1.0 \
--theta_1=0.001 --theta_2=0.001 --theta_freq=1 \
--stats_functions 'min' 'max' 'mean'
import numpy as np
import pandas as pd
import json
import sparse
S = sparse.load_npz('output-2-parallel/S_all.npz')
S_names = json.load(open('output-2-parallel/S_all.feature_names.json', 'r'))
S_index = pd.read_csv('output-2-parallel/S.ID.csv').set_index(['ID'])
df_S = pd.DataFrame(S.todense(), columns=S_names, index=S_index.index)
X = sparse.load_npz('output-2-parallel/X_all.npz')
X_names = json.load(open('output-2-parallel/X_all.feature_names.json', 'r'))
X_index = pd.read_csv('output-2-parallel/X.ID,t_range.csv').set_index(['ID', 't_range'])
df_X = pd.DataFrame(X.todense().reshape(-1, X.shape[-1]), columns=X_names, index=X_index.index)
display(df_S)
display(df_X)
```
## Test 3: discretize = False
```
!mkdir -p output-3-parallel
! PYTHONPATH="$PYTHONPATH:../../" \
python -m FIDDLE.run \
--data_fname='./input/data.csv' \
--population_fname='./input/pop.csv' \
--config_fname='./input/config-3-parallel.yaml' \
--output_dir='./output-3-parallel/' \
--T=4 --dt=1.0 \
--theta_1=0.001 --theta_2=0.001 --theta_freq=1 \
--stats_functions 'min' 'max' 'mean'
import numpy as np
import pandas as pd
import json
import sparse
S = sparse.load_npz('output-3-parallel/S_all.npz')
S_names = json.load(open('output-3-parallel/S_all.feature_names.json', 'r'))
S_index = pd.read_csv('output-3-parallel/S.ID.csv').set_index(['ID'])
df_S = pd.DataFrame(S.todense(), columns=S_names, index=S_index.index)
X = sparse.load_npz('output-3-parallel/X_all.npz')
X_names = json.load(open('output-3-parallel/X_all.feature_names.json', 'r'))
X_index = pd.read_csv('output-3-parallel/X.ID,t_range.csv').set_index(['ID', 't_range'])
df_X = pd.DataFrame(X.todense().reshape(-1, X.shape[-1]), columns=X_names, index=X_index.index)
display(df_S)
display(df_X)
```
|
github_jupyter
|
!rm -rf output-*/
!mkdir -p output-1-parallel
! PYTHONPATH="$PYTHONPATH:../../" \
python -m FIDDLE.run \
--data_fname='./input/data.csv' \
--population_fname='./input/pop.csv' \
--config_fname='./input/config-1-parallel.yaml' \
--output_dir='./output-1-parallel/' \
--T=4 --dt=1.0 \
--theta_1=0.001 --theta_2=0.001 --theta_freq=1 \
--stats_functions 'min' 'max' 'mean'
import numpy as np
import pandas as pd
import json
import sparse
S = sparse.load_npz('output-1-parallel/S_all.npz')
S_names = json.load(open('output-1-parallel/S_all.feature_names.json', 'r'))
S_index = pd.read_csv('output-1-parallel/S.ID.csv').set_index(['ID'])
df_S = pd.DataFrame(S.todense(), columns=S_names, index=S_index.index)
X = sparse.load_npz('output-1-parallel/X_all.npz')
X_names = json.load(open('output-1-parallel/X_all.feature_names.json', 'r'))
X_index = pd.read_csv('output-1-parallel/X.ID,t_range.csv').set_index(['ID', 't_range'])
df_X = pd.DataFrame(X.todense().reshape(-1, X.shape[-1]), columns=X_names, index=X_index.index)
display(df_S)
display(df_X)
!mkdir -p output-2-parallel
! PYTHONPATH="$PYTHONPATH:../../" \
python -m FIDDLE.run \
--data_fname='./input/data.csv' \
--population_fname='./input/pop.csv' \
--config_fname='./input/config-2-parallel.yaml' \
--output_dir='./output-2-parallel/' \
--T=4 --dt=1.0 \
--theta_1=0.001 --theta_2=0.001 --theta_freq=1 \
--stats_functions 'min' 'max' 'mean'
import numpy as np
import pandas as pd
import json
import sparse
S = sparse.load_npz('output-2-parallel/S_all.npz')
S_names = json.load(open('output-2-parallel/S_all.feature_names.json', 'r'))
S_index = pd.read_csv('output-2-parallel/S.ID.csv').set_index(['ID'])
df_S = pd.DataFrame(S.todense(), columns=S_names, index=S_index.index)
X = sparse.load_npz('output-2-parallel/X_all.npz')
X_names = json.load(open('output-2-parallel/X_all.feature_names.json', 'r'))
X_index = pd.read_csv('output-2-parallel/X.ID,t_range.csv').set_index(['ID', 't_range'])
df_X = pd.DataFrame(X.todense().reshape(-1, X.shape[-1]), columns=X_names, index=X_index.index)
display(df_S)
display(df_X)
!mkdir -p output-3-parallel
! PYTHONPATH="$PYTHONPATH:../../" \
python -m FIDDLE.run \
--data_fname='./input/data.csv' \
--population_fname='./input/pop.csv' \
--config_fname='./input/config-3-parallel.yaml' \
--output_dir='./output-3-parallel/' \
--T=4 --dt=1.0 \
--theta_1=0.001 --theta_2=0.001 --theta_freq=1 \
--stats_functions 'min' 'max' 'mean'
import numpy as np
import pandas as pd
import json
import sparse
S = sparse.load_npz('output-3-parallel/S_all.npz')
S_names = json.load(open('output-3-parallel/S_all.feature_names.json', 'r'))
S_index = pd.read_csv('output-3-parallel/S.ID.csv').set_index(['ID'])
df_S = pd.DataFrame(S.todense(), columns=S_names, index=S_index.index)
X = sparse.load_npz('output-3-parallel/X_all.npz')
X_names = json.load(open('output-3-parallel/X_all.feature_names.json', 'r'))
X_index = pd.read_csv('output-3-parallel/X.ID,t_range.csv').set_index(['ID', 't_range'])
df_X = pd.DataFrame(X.todense().reshape(-1, X.shape[-1]), columns=X_names, index=X_index.index)
display(df_S)
display(df_X)
| 0.298594 | 0.439146 |
# Manipulate Data with `NDArray`
NDArray - primary tool for data storage and transformation in MXNet.
We’ll start by introducing the `NDArray`, MXNet’s primary tool for storing and transforming data. If you’ve worked with `NumPy` before, you’ll notice that a NDArray is, by design, similar to NumPy’s multi-dimensional array.
## Getting Started
To get started, let's import the `ndarray` package (`nd` is shortform) from MXNet.
```
import mxnet as mx
from mxnet import nd
```
### Create a 2-D Array
Next, let's see how to create a 2-D array (also called a matrix) with values from two sets of numbers: 1, 2, 3 and 4, 5, 6. This might also be referred to as a tuple of a tuple of integers.
```
# set ctx=mx.gpu(0) to create one on a GPU
a = nd.array(((1,2,3),(5,6,7)), ctx=mx.cpu())
a
```
### Create a 2-D Array of Ones
We can also create a very simple matrix with the same shape (2 rows by 3 columns), but fill it with 1s.
```
ones = nd.ones((2,3))
ones
```
### Create an Array of Random Values
Often we’ll want to create arrays whose values are sampled randomly. For example, sampling values uniformly between -1 and 1. Here we create the same shape, but with random sampling.
```
rand_uniform = nd.random.uniform(-1,1,shape=(3,3))
rand_uniform
```
You can also fill an array of a given shape with a given value, such as `2.0`.
<!-- added to improve multiplication example -->
```
twos = nd.full((2,3), 2.0)
twos
```
### Inspect an NDArray's Attributes
As with NumPy, the dimensions of each NDArray are accessible by accessing the `.shape` attribute. We can also query its `size`, which is equal to the product of the components of the shape. In addition, `.dtype` tells the data type of the stored values.
```
(a.shape, a.size, a.dtype, a.context)
```
## Operations
NDArray supports a large number of standard mathematical operations.
### Element-wise Operations
```
twos = nd.full((2,3), 2.0)
a * twos
twos.exp()
```
### Matrix Multiplications
And grab a matrix’s transpose to compute a proper matrix-matrix product:
```
nd.dot(a, a.T)
```
## Indexing
#### Read a Particular Element
MXNet NDArrays support slicing in all the ridiculous ways you might imagine accessing your data. Here’s an example of reading a particular element, which returns a 1D array with shape `(1,)`.
```
a[1,2]
```
#### Read the Second and Third Columns
```
a[:,1:3]
```
#### Use Negative Indices
```
a[-1]
```
#### Write to a Specific Element
```
a[:,1:3] = 2
a
```
#### Use Multi-dimensional Slicing
```
a[1:2,0:2] = 4
a
```
## Converting between MXNet NDArray and NumPy
Converting MXNet NDArrays to and from NumPy is easy. The converted arrays do not share memory.
```
x = a.asnumpy()
(type(x), x)
nd.array(x)
```
## NDArray on GPUs
```
x_gpu = mx.nd.array(x, ctx=mx.gpu(0))
a_gpu = a.copyto(mx.gpu(0))
print(x_gpu)
print(a_gpu)
```
|
github_jupyter
|
import mxnet as mx
from mxnet import nd
# set ctx=mx.gpu(0) to create one on a GPU
a = nd.array(((1,2,3),(5,6,7)), ctx=mx.cpu())
a
ones = nd.ones((2,3))
ones
rand_uniform = nd.random.uniform(-1,1,shape=(3,3))
rand_uniform
twos = nd.full((2,3), 2.0)
twos
(a.shape, a.size, a.dtype, a.context)
twos = nd.full((2,3), 2.0)
a * twos
twos.exp()
nd.dot(a, a.T)
a[1,2]
a[:,1:3]
a[-1]
a[:,1:3] = 2
a
a[1:2,0:2] = 4
a
x = a.asnumpy()
(type(x), x)
nd.array(x)
x_gpu = mx.nd.array(x, ctx=mx.gpu(0))
a_gpu = a.copyto(mx.gpu(0))
print(x_gpu)
print(a_gpu)
| 0.376852 | 0.992386 |
<a href="https://colab.research.google.com/github/aubricot/computer_vision_with_eol_images/blob/master/classification_for_image_tagging/rating/inspect_train_results.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Determine confidence threshold for Image Rating Classification Models
---
*Last Updated 26 October 2021*
Choose which trained model and confidence threshold values to use for classifying EOL image ratings. Threshold values should be chosen that maximize coverage and minimize error.
First, choose the best models trained in [rating_train.ipynb](https://colab.research.google.com/github/aubricot/computer_vision_with_eol_images/blob/master/classification_for_image_tagging/rating/rating_train.ipynb). Then, run this notebook.
Run 500 images per class (Image ratings 1-5) through the best models chosen in rating_train.ipynb for validation of model performance. Plot histograms of true and false predictions per class at binned confidence intervals to find the best performance by class and confidence threshold. (This is helpful because all models may not learn classes equally well).
***Models were trained in Python 2 and TF 1 in December 2020: MobileNet SSD v2 (Run 18, trained on 'good' and 'bad' classes) was trained for 12 hours to 10 epochs with Batch Size=16, Lr=0.001, Dropout=0.2. Inception v3 was trained for 12 hours to 10 epochs with Batch Size=32 Lr=0.001, Dropout=0 (Run 20, trained on 'good' and 'bad' classes). Inception v3 was trained for 4 hours to 15 epochs with Batch Size=64, Lr=0.1, Dropout=0 (Run 6, trained on numerical rating classes 1-5).***
Notes:
* Change parameters using form fields on right (/where you see 'TO DO' in code)
## Installs & Imports
---
```
# Mount google drive to import/export files
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# For working with data
import itertools
import os
import numpy as np
import pandas as pd
# For downloading and displaying images
import matplotlib.pyplot as plt
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
%matplotlib inline
# For measuring inference time
import time
# For image classification and training
import tensorflow as tf
# Define functions
# To read in EOL formatted data files
def read_datafile(fpath, sep="\t", header=0, disp_head=True, lineterminator='\n', encoding='latin1'):
"""
Defaults to tab-separated data files with header in row 0
"""
try:
df = pd.read_csv(fpath, sep=sep, header=header, lineterminator=lineterminator, encoding=encoding)
if disp_head:
print("Data header: \n", df.head())
except FileNotFoundError as e:
raise Exception("File not found: Enter the path to your file in form field and re-run").with_traceback(e.__traceback__)
return df
# List filenames of all images used for training/testing models
def list_train_images(imclasses):
# Get image class bundle filenames
all_filenames = ['image_data/' + imclass + '_download_7k.txt' for imclass in imclasses]
print('Image class bundles used for training/testing models: \n', all_filenames)
# Make combined list all image ratings from bundles
used_images = []
for fn in all_filenames:
df = pd.read_csv(fn, index_col=None, header=1, sep='\n')
df.columns = ['link']
used_images.append(df)
used_images = pd.concat(used_images, axis=0, ignore_index=True)
print('No. image ratings used for training/testing: {}'.format(len(used_images),
used_images.head()))
return used_images
# Remove all images used for training/testing from EOL bundle
def remove_used_images(df, used_images, dataset):
print("Total image ratings available for {}: {}".format(dataset, len(df)))
if 'object_url' in df:
df.rename(columns={'object_url':'obj_url'}, inplace=True)
condition = df['obj_url'].isin(used_images['link'])
df.drop(df[condition].index, inplace = True)
unused_images = df.copy()
print("Total un-used image ratings available for {}: {}".format(dataset, len(unused_images)))
return unused_images
# Make master unused image dataset for ratings and exemplars
def make_master_unused_df(ratings, exemplars):
# Reformat image ratings to match exemplars
df1 = unused_ratings[["obj_with_overall_rating", "obj_url", "overall_rating", "ancestry"]].copy()
df1.rename(columns={"obj_with_overall_rating": "obj_id"}, inplace=True)
print(df1.head())
# Reformat image exemplars to match ratings
df2 = unused_exemplars[["target_id", "obj_url", "ancestry"]].copy()
df2.rename(columns={"object_url":"obj_url", "target_id": "obj_id"}, inplace=True)
df2["overall_rating"] = 5
# Merge ratings and exemplars
unused_images = pd.concat([df1, df2])
print("Master un-used image ratings for validation (ratings + exemplars): {}\n{}".format(len(unused_images), unused_images))
return unused_images
```
## Build validation dataset (Only run once)
---
Build dataset of image ratings for images not previously seen by models.
Removes image ratings found in EOL user generated rating and exemplar files that were used in 7k training/testing datasets
```
# Find images with ratings that were not used for training or testing models
# TO DO: Type in the path to your working directory in form field to right
wd = "/content/drive/MyDrive/train/" #@param {type:"string"}
# Set current working directory
cwd = wd + 'pre-processing/'
%cd $cwd
# Get list of images used for 7k training/testing datasets
# TO DO: Make list of image classes used for training
imclasses = ['1', '2', '3', '4', '5'] #@param
used_images = list_train_images(imclasses)
# Remove images already used for training/testing from EOL rating dataset
df = read_datafile("image_data/image_ratings.txt", disp_head=False)
unused_ratings = remove_used_images(df, used_images, "Ratings")
unused_ratings.to_csv('image_data/unused_image_ratings_foreval.txt', sep="\t", index=False, header=True)
# Remove images already used for training/testing from EOL exemplar dataset (used to supplment rating=5)
df = read_datafile("image_data/images_selected_as_exemplar.txt", disp_head=False)
unused_exemplars = remove_used_images(df, used_images, "Exemplars")
unused_exemplars.to_csv('image_data/unused_image_exemplars_foreval.txt', sep="\t", index=False, header=True)
# Make master unused images dataset for ratings and exemplars
unused_images = make_master_unused_df(unused_ratings, unused_exemplars)
unused_images.to_csv('image_data/unused_images_foreval_master.txt', sep="\t", index=False, header=True)
```
## Run images through for classification and validating predictions (Run 1x for each trained model)
---
Selected models from rating_train.ipynb
* Run 20: Inception v3 (trained on 'good' and 'bad' classes)
* Run 18: Mobilenet SSD v2 (trained on 'good' and 'bad' classes)
* Run 06: Inception v3 (trained on numerical rating classes 1-5)
```
# Set parameters
# TO DO: Choose training attempt number to inspect results for
TRAIN_SESS_NUM = "18" #@param ["20", "18", "06"] {allow-input: true}
# Directory to saved models
saved_models_dir = wd + 'saved_models/'
# Set current working directory
cwd = wd + 'inspect_resul/'
%cd $cwd
# Suppress pandas setting with copy warning
pd.options.mode.chained_assignment = None # default='warn'
# Define functions
# Define start and stop indices in EOL bundle for running inference
def set_start_stop(df):
# To test with a tiny subset, use 5 random bundle images
N = len(df)
if test_with_tiny_subset:
start=np.random.choice(a=N, size=1)[0]
stop=start+5
# To run for larger set, use 500 random images
else :
start=np.random.choice(a=N, size=1)[0]
stop=start+500
print("Running inference on images")
return start, stop
# Load saved model from directory
def load_saved_model(saved_models_dir, TRAIN_SESS_NUM, module_selection):
# Load trained model from path
saved_model_path = saved_models_dir + TRAIN_SESS_NUM
model = tf.keras.models.load_model(saved_model_path)
# Get name and image size for model type
handle_base, pixels = module_selection
return model, pixels, handle_base
# Get info about model based on training attempt number
def get_model_info(TRAIN_SESS_NUM):
# Session 18
if int(TRAIN_SESS_NUM) == 18:
module_selection =("mobilenet_v2_1.0_224", 224)
dataset_labels = ['bad', 'good'] # Classes aggregated after attempt 7: 1/2 -> bad, 4/5 -> good
# Session 20
elif int(TRAIN_SESS_NUM) == 20:
module_selection = ("inception_v3", 299)
dataset_labels = ['bad', 'good'] # Classes aggregated after attempt 7: 1/2 -> bad, 4/5 -> good
# Session 6
elif int(TRAIN_SESS_NUM) == 6:
module_selection = ("inception_v3", 299)
dataset_labels = ['1', '2', '3', '4', '5'] # Before aggregating classes
return module_selection, dataset_labels
# Set filename for saving classification results
def set_outpath(true_imclass):
outpath = wd + 'inspect_resul/ratings_' + TRAIN_SESS_NUM + '_' + true_imclass + '.csv'
print("Saving results to: \n", outpath)
return outpath
# Load in image from URL
# Modified from https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/saved_model.ipynb#scrollTo=JhVecdzJTsKE
def image_from_url(url, fn):
file = tf.keras.utils.get_file(fn, url) # Filename doesn't matter
disp_img = tf.keras.preprocessing.image.load_img(file)
image = tf.keras.preprocessing.image.load_img(file, target_size=[pixels, pixels])
image = tf.keras.preprocessing.image.img_to_array(image)
image = tf.keras.applications.mobilenet_v2.preprocess_input(
image[tf.newaxis,...])
return image, disp_img
# Get info from predictions to display on images
def get_predict_info(predictions, url, i, stop, start):
# Get info from predictions
label_num = np.argmax(predictions[0], axis=-1)
conf = predictions[0][label_num]
im_class = dataset_labels[label_num]
# Display progress message after each image
print("Completed for {}, {} of {} files".format(url, i, format(stop-start, '.0f')))
return label_num, conf, im_class
# Record results for confidence thresholds
# Make placeholder lists to fill for each class
def make_placeholders():
filenames = []
confidences = []
true_imclasses = []
det_imclasses = []
ancestries = []
return filenames, confidences, true_imclasses, det_imclasses, ancestries
# Add values for each image to placeholder list
def record_results(fn, conf, true_imclass, det_imclass, ancestry):
filenames.append(fn)
confidences.append(conf)
true_imclasses.append(true_imclass)
det_imclasses.append(str(det_imclass))
ancestries.append(ancestry)
results = [filenames, confidences, true_imclasses, det_imclasses, ancestries]
return results
# Export results
def export_results(results):
results = pd.DataFrame(results)
results = results.transpose()
results.to_csv(outpath, index=False, header=("filename", "confidence",
"true_id", "det_id", "ancestry"))
print("Classification predictions for image class {}: {}".format(
true_imclass, results.head()))
# Run inference
# Test with tiny subset (5 images)?
# TO DO: If yes, check test_with_tiny_subset box
test_with_tiny_subset = True #@param {type: "boolean"}
# Load saved model
module_selection, dataset_labels = get_model_info(TRAIN_SESS_NUM)
model, pixels, handle_base = load_saved_model(saved_models_dir, TRAIN_SESS_NUM, module_selection)
# Run inference for each image class to compare known versus predicted ratings
true_imclasses = ['1', '2', '3', '4', '5']
for true_imclass in true_imclasses:
# Set filename for saving classification results
outpath = set_outpath(true_imclass)
# Make placeholder lists to record values for each image
filenames, confidences, true_imclasses, det_imclasses, ancestries = make_placeholders()
# Load subset of in validation images df for each image class
df = unused_images.copy()
df = df[df.overall_rating==int(true_imclass)]
# Run 500 random EOL bundle images through trained model
start, stop = set_start_stop(df)
for i, row in df.iloc[start:stop].iterrows():
try:
# Read in image from url
url = df['obj_url'][i]
fn = str(i) + '.jpg'
img, disp_img = image_from_url(url, fn)
ancestry = df['ancestry'][i]
# Image classification
start_time = time.time() # Record inference time
predictions = model.predict(img, batch_size=1)
label_num, conf, det_imclass = get_predict_info(predictions, url, i, stop, start)
end_time = time.time()
print("Inference time: {} sec".format(format(end_time-start_time, '.2f')))
# Record results in placeholder lists to inspect results in next step
results = record_results(fn, conf, true_imclass, str(det_imclass), ancestry)
except:
pass
# Combine to df and export results
export_results(results)
# Combine model outputs for image rating classes 1-5
# Combine prediction files created in codeblock above
true_imclasses = ['1', '2', '3', '4', '5']
base = 'ratings_' + TRAIN_SESS_NUM + '_'
all_filenames = [base + true_imclass + '.csv' for true_imclass in true_imclasses]
all_predictions = pd.concat([pd.read_csv(f, sep=',', header=0, na_filter = False) for f in all_filenames])
print("Model predictions for Training Attempt {}, {}:".format(TRAIN_SESS_NUM, handle_base))
print("No. Images: {}\n{}".format(len(all_predictions), all_predictions[['filename', 'true_id', 'det_id']].head()))
# Aggregate numerical "true_id" classes into 'bad' and 'good'
# TO DO: Enter class names
c0 = "bad" #@param {type:"string"}
c1 = "good" #@param {type:"string"}
imclasses = [c0, c1]
# All predictions of 1 or 2 become 'bad'
all_predictions.true_id[(all_predictions.true_id==1) | (all_predictions.true_id==2)] = c0
all_predictions.det_id[(all_predictions.det_id==1) | (all_predictions.det_id==2)] = c0
# All predictions of 4 or 5 become 'good'
all_predictions.true_id[(all_predictions.true_id==4) | (all_predictions.true_id==5)] = c1
all_predictions.det_id[(all_predictions.det_id==4) | (all_predictions.det_id==5)] = c1
# Remove all predictions of 3
all_predictions = all_predictions[all_predictions.det_id!=3]
all_predictions = all_predictions[all_predictions.true_id!=3]
print("Numeric image ratings successfully aggregated into {} (1-2) and {} (4-5):\n{}".format(c0, c1, all_predictions[['filename', 'true_id', 'det_id']].head()))
```
## Plot prediction error and confidence for each class (Run 1x for each trained model)
---
Use these histograms to find a confidence threshold value to optimize dataset coverage and accuracy
```
# Define functions
# Valide predictions by image class (and optionally, by: taxon)
def validate_predictions(df, inspect_by_taxon):
# If inspecting for taxon-specific images only
taxon = None
if inspect_by_taxon:
# TO DO: Type in the taxon you'd like to inspect results for using form field to right
taxon = "" #@param {type:"string"}
df = df.loc[df.ancestry.str.contains(taxon, case=False, na=False)]
print("Inspecting results for {}:\n{}".format(taxon, df.head()))
# Validate predictions
# Check where true ratings and model-determined classes match
df['det'] = (df['true_id'] == df['det_id'])
tru = df.loc[df.det, :] # True ID
fal = df.loc[~df.det, :] # False ID
# Inspect by image class and confidence values
# Check how many true/false predictions are at each confidence value
# Class 0 - 'Bad'
c0t = tru.loc[tru['true_id'] == c0, :] # True dets
c0f = fal.loc[fal['true_id'] == c0, :] # False dets
# Class 1 - 'Good'
c1t = tru.loc[tru['true_id'] == c1, :] # True dets
c1f = fal.loc[fal['true_id'] == c1, :] # False dets
return tru, fal, c0t, c0f, c1t, c1f, taxon
# Plot results by image class
def plot_predict_x_conf(tru, fal, c0t, c0f, c1t, c1f, imclasses=imclasses, thresh=thresh):
# Plot parameters to make 1 subplot per image class
kwargs = dict(alpha=0.5, bins=15)
fig, axes = plt.subplots(len(imclasses), figsize=(10, 10), constrained_layout=True)
fig.suptitle('Prediction Confidence by Class\n Overall Accuracy: {}'.format(
format((len(tru)/(len(tru)+len(fal))),'.2f')))
# Make subplots
# Class 0 - 'Bad'
c0 = df.iloc[:, :N] # Pull N items for c0
# True predictions
axes[0].hist(c0t['confidence'], color='y', label='True Det', **kwargs)
# False predictions
axes[0].hist(c0f['confidence'], color='r', label='False Det', **kwargs)
axes[0].set_title("{} (n={} images)\n Accuracy: {}".format(imclasses[0],
len(c0t+c0f), format((len(c0t)/(len(c0t)+len(c0f))),'.2f')))
axes[0].legend();
# Class 1 - 'Good'
c1 = df.iloc[:, N:2*N] # Pull N items for c1
# True predictions
axes[1].hist(c1t['confidence'], color='y', label='True Det', **kwargs)
# False predictions
axes[1].hist(c1f['confidence'], color='r', label='False Det', **kwargs)
axes[1].set_title("{} (n={} images)\n Accuracy: {}".format(imclasses[1],
len(c1t+c1f), format((len(c1t)/(len(c1t)+len(c1f))),'.2f')))
axes[1].legend();
# Add Y-axis labels
for ax in fig.get_axes():
ax.set(ylabel='Freq (# imgs)')
if thresh:
ax.axvline(thresh, color='k', linestyle='dashed', linewidth=1)
return fig
# To save the figure
def save_figure(fig, TRAIN_SESS_NUM=TRAIN_SESS_NUM, taxon=taxon, handle_base=handle_base):
# Make filename
if taxon: # If for a specific taxon
if 'plant' in taxon:
handle_base = handle_base + '_plantae'
elif 'anim' in taxon:
handle_base = handle_base + '_animalia'
figname = TRAIN_SESS_NUM + '_' + handle_base + '.png'
fig.savefig(figname)
print("Histograms saved to ", figname)
return figname
# Load combined prediction results from above
df = all_predictions.copy()
# Optional: Inspect predictions for taxon-specific images only?
# TO DO: If "yes," check box
inspect_by_taxon = False #@param {type:"boolean"}
# Optional: Draw threshold value to help choose optimal balance b/w maximizing useful data and minimizing error
# TO DO: Set threshold value
thresh = 0 #@param {type:"number"}
# Valide predictions by image class (and optionally, by: taxon)
tru, fal, c0t, c0f, c1t, c1f, taxon = validate_predictions(df, inspect_by_taxon)
# Plot results by image class
fig = plot_predict_x_conf(tru, fal, c0t, c0f, c1t, c1f)
# Export histograms
figname = save_figure(fig)
```
|
github_jupyter
|
# Mount google drive to import/export files
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# For working with data
import itertools
import os
import numpy as np
import pandas as pd
# For downloading and displaying images
import matplotlib.pyplot as plt
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
%matplotlib inline
# For measuring inference time
import time
# For image classification and training
import tensorflow as tf
# Define functions
# To read in EOL formatted data files
def read_datafile(fpath, sep="\t", header=0, disp_head=True, lineterminator='\n', encoding='latin1'):
"""
Defaults to tab-separated data files with header in row 0
"""
try:
df = pd.read_csv(fpath, sep=sep, header=header, lineterminator=lineterminator, encoding=encoding)
if disp_head:
print("Data header: \n", df.head())
except FileNotFoundError as e:
raise Exception("File not found: Enter the path to your file in form field and re-run").with_traceback(e.__traceback__)
return df
# List filenames of all images used for training/testing models
def list_train_images(imclasses):
# Get image class bundle filenames
all_filenames = ['image_data/' + imclass + '_download_7k.txt' for imclass in imclasses]
print('Image class bundles used for training/testing models: \n', all_filenames)
# Make combined list all image ratings from bundles
used_images = []
for fn in all_filenames:
df = pd.read_csv(fn, index_col=None, header=1, sep='\n')
df.columns = ['link']
used_images.append(df)
used_images = pd.concat(used_images, axis=0, ignore_index=True)
print('No. image ratings used for training/testing: {}'.format(len(used_images),
used_images.head()))
return used_images
# Remove all images used for training/testing from EOL bundle
def remove_used_images(df, used_images, dataset):
print("Total image ratings available for {}: {}".format(dataset, len(df)))
if 'object_url' in df:
df.rename(columns={'object_url':'obj_url'}, inplace=True)
condition = df['obj_url'].isin(used_images['link'])
df.drop(df[condition].index, inplace = True)
unused_images = df.copy()
print("Total un-used image ratings available for {}: {}".format(dataset, len(unused_images)))
return unused_images
# Make master unused image dataset for ratings and exemplars
def make_master_unused_df(ratings, exemplars):
# Reformat image ratings to match exemplars
df1 = unused_ratings[["obj_with_overall_rating", "obj_url", "overall_rating", "ancestry"]].copy()
df1.rename(columns={"obj_with_overall_rating": "obj_id"}, inplace=True)
print(df1.head())
# Reformat image exemplars to match ratings
df2 = unused_exemplars[["target_id", "obj_url", "ancestry"]].copy()
df2.rename(columns={"object_url":"obj_url", "target_id": "obj_id"}, inplace=True)
df2["overall_rating"] = 5
# Merge ratings and exemplars
unused_images = pd.concat([df1, df2])
print("Master un-used image ratings for validation (ratings + exemplars): {}\n{}".format(len(unused_images), unused_images))
return unused_images
# Find images with ratings that were not used for training or testing models
# TO DO: Type in the path to your working directory in form field to right
wd = "/content/drive/MyDrive/train/" #@param {type:"string"}
# Set current working directory
cwd = wd + 'pre-processing/'
%cd $cwd
# Get list of images used for 7k training/testing datasets
# TO DO: Make list of image classes used for training
imclasses = ['1', '2', '3', '4', '5'] #@param
used_images = list_train_images(imclasses)
# Remove images already used for training/testing from EOL rating dataset
df = read_datafile("image_data/image_ratings.txt", disp_head=False)
unused_ratings = remove_used_images(df, used_images, "Ratings")
unused_ratings.to_csv('image_data/unused_image_ratings_foreval.txt', sep="\t", index=False, header=True)
# Remove images already used for training/testing from EOL exemplar dataset (used to supplment rating=5)
df = read_datafile("image_data/images_selected_as_exemplar.txt", disp_head=False)
unused_exemplars = remove_used_images(df, used_images, "Exemplars")
unused_exemplars.to_csv('image_data/unused_image_exemplars_foreval.txt', sep="\t", index=False, header=True)
# Make master unused images dataset for ratings and exemplars
unused_images = make_master_unused_df(unused_ratings, unused_exemplars)
unused_images.to_csv('image_data/unused_images_foreval_master.txt', sep="\t", index=False, header=True)
# Set parameters
# TO DO: Choose training attempt number to inspect results for
TRAIN_SESS_NUM = "18" #@param ["20", "18", "06"] {allow-input: true}
# Directory to saved models
saved_models_dir = wd + 'saved_models/'
# Set current working directory
cwd = wd + 'inspect_resul/'
%cd $cwd
# Suppress pandas setting with copy warning
pd.options.mode.chained_assignment = None # default='warn'
# Define functions
# Define start and stop indices in EOL bundle for running inference
def set_start_stop(df):
# To test with a tiny subset, use 5 random bundle images
N = len(df)
if test_with_tiny_subset:
start=np.random.choice(a=N, size=1)[0]
stop=start+5
# To run for larger set, use 500 random images
else :
start=np.random.choice(a=N, size=1)[0]
stop=start+500
print("Running inference on images")
return start, stop
# Load saved model from directory
def load_saved_model(saved_models_dir, TRAIN_SESS_NUM, module_selection):
# Load trained model from path
saved_model_path = saved_models_dir + TRAIN_SESS_NUM
model = tf.keras.models.load_model(saved_model_path)
# Get name and image size for model type
handle_base, pixels = module_selection
return model, pixels, handle_base
# Get info about model based on training attempt number
def get_model_info(TRAIN_SESS_NUM):
# Session 18
if int(TRAIN_SESS_NUM) == 18:
module_selection =("mobilenet_v2_1.0_224", 224)
dataset_labels = ['bad', 'good'] # Classes aggregated after attempt 7: 1/2 -> bad, 4/5 -> good
# Session 20
elif int(TRAIN_SESS_NUM) == 20:
module_selection = ("inception_v3", 299)
dataset_labels = ['bad', 'good'] # Classes aggregated after attempt 7: 1/2 -> bad, 4/5 -> good
# Session 6
elif int(TRAIN_SESS_NUM) == 6:
module_selection = ("inception_v3", 299)
dataset_labels = ['1', '2', '3', '4', '5'] # Before aggregating classes
return module_selection, dataset_labels
# Set filename for saving classification results
def set_outpath(true_imclass):
outpath = wd + 'inspect_resul/ratings_' + TRAIN_SESS_NUM + '_' + true_imclass + '.csv'
print("Saving results to: \n", outpath)
return outpath
# Load in image from URL
# Modified from https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/saved_model.ipynb#scrollTo=JhVecdzJTsKE
def image_from_url(url, fn):
file = tf.keras.utils.get_file(fn, url) # Filename doesn't matter
disp_img = tf.keras.preprocessing.image.load_img(file)
image = tf.keras.preprocessing.image.load_img(file, target_size=[pixels, pixels])
image = tf.keras.preprocessing.image.img_to_array(image)
image = tf.keras.applications.mobilenet_v2.preprocess_input(
image[tf.newaxis,...])
return image, disp_img
# Get info from predictions to display on images
def get_predict_info(predictions, url, i, stop, start):
# Get info from predictions
label_num = np.argmax(predictions[0], axis=-1)
conf = predictions[0][label_num]
im_class = dataset_labels[label_num]
# Display progress message after each image
print("Completed for {}, {} of {} files".format(url, i, format(stop-start, '.0f')))
return label_num, conf, im_class
# Record results for confidence thresholds
# Make placeholder lists to fill for each class
def make_placeholders():
filenames = []
confidences = []
true_imclasses = []
det_imclasses = []
ancestries = []
return filenames, confidences, true_imclasses, det_imclasses, ancestries
# Add values for each image to placeholder list
def record_results(fn, conf, true_imclass, det_imclass, ancestry):
filenames.append(fn)
confidences.append(conf)
true_imclasses.append(true_imclass)
det_imclasses.append(str(det_imclass))
ancestries.append(ancestry)
results = [filenames, confidences, true_imclasses, det_imclasses, ancestries]
return results
# Export results
def export_results(results):
results = pd.DataFrame(results)
results = results.transpose()
results.to_csv(outpath, index=False, header=("filename", "confidence",
"true_id", "det_id", "ancestry"))
print("Classification predictions for image class {}: {}".format(
true_imclass, results.head()))
# Run inference
# Test with tiny subset (5 images)?
# TO DO: If yes, check test_with_tiny_subset box
test_with_tiny_subset = True #@param {type: "boolean"}
# Load saved model
module_selection, dataset_labels = get_model_info(TRAIN_SESS_NUM)
model, pixels, handle_base = load_saved_model(saved_models_dir, TRAIN_SESS_NUM, module_selection)
# Run inference for each image class to compare known versus predicted ratings
true_imclasses = ['1', '2', '3', '4', '5']
for true_imclass in true_imclasses:
# Set filename for saving classification results
outpath = set_outpath(true_imclass)
# Make placeholder lists to record values for each image
filenames, confidences, true_imclasses, det_imclasses, ancestries = make_placeholders()
# Load subset of in validation images df for each image class
df = unused_images.copy()
df = df[df.overall_rating==int(true_imclass)]
# Run 500 random EOL bundle images through trained model
start, stop = set_start_stop(df)
for i, row in df.iloc[start:stop].iterrows():
try:
# Read in image from url
url = df['obj_url'][i]
fn = str(i) + '.jpg'
img, disp_img = image_from_url(url, fn)
ancestry = df['ancestry'][i]
# Image classification
start_time = time.time() # Record inference time
predictions = model.predict(img, batch_size=1)
label_num, conf, det_imclass = get_predict_info(predictions, url, i, stop, start)
end_time = time.time()
print("Inference time: {} sec".format(format(end_time-start_time, '.2f')))
# Record results in placeholder lists to inspect results in next step
results = record_results(fn, conf, true_imclass, str(det_imclass), ancestry)
except:
pass
# Combine to df and export results
export_results(results)
# Combine model outputs for image rating classes 1-5
# Combine prediction files created in codeblock above
true_imclasses = ['1', '2', '3', '4', '5']
base = 'ratings_' + TRAIN_SESS_NUM + '_'
all_filenames = [base + true_imclass + '.csv' for true_imclass in true_imclasses]
all_predictions = pd.concat([pd.read_csv(f, sep=',', header=0, na_filter = False) for f in all_filenames])
print("Model predictions for Training Attempt {}, {}:".format(TRAIN_SESS_NUM, handle_base))
print("No. Images: {}\n{}".format(len(all_predictions), all_predictions[['filename', 'true_id', 'det_id']].head()))
# Aggregate numerical "true_id" classes into 'bad' and 'good'
# TO DO: Enter class names
c0 = "bad" #@param {type:"string"}
c1 = "good" #@param {type:"string"}
imclasses = [c0, c1]
# All predictions of 1 or 2 become 'bad'
all_predictions.true_id[(all_predictions.true_id==1) | (all_predictions.true_id==2)] = c0
all_predictions.det_id[(all_predictions.det_id==1) | (all_predictions.det_id==2)] = c0
# All predictions of 4 or 5 become 'good'
all_predictions.true_id[(all_predictions.true_id==4) | (all_predictions.true_id==5)] = c1
all_predictions.det_id[(all_predictions.det_id==4) | (all_predictions.det_id==5)] = c1
# Remove all predictions of 3
all_predictions = all_predictions[all_predictions.det_id!=3]
all_predictions = all_predictions[all_predictions.true_id!=3]
print("Numeric image ratings successfully aggregated into {} (1-2) and {} (4-5):\n{}".format(c0, c1, all_predictions[['filename', 'true_id', 'det_id']].head()))
# Define functions
# Valide predictions by image class (and optionally, by: taxon)
def validate_predictions(df, inspect_by_taxon):
# If inspecting for taxon-specific images only
taxon = None
if inspect_by_taxon:
# TO DO: Type in the taxon you'd like to inspect results for using form field to right
taxon = "" #@param {type:"string"}
df = df.loc[df.ancestry.str.contains(taxon, case=False, na=False)]
print("Inspecting results for {}:\n{}".format(taxon, df.head()))
# Validate predictions
# Check where true ratings and model-determined classes match
df['det'] = (df['true_id'] == df['det_id'])
tru = df.loc[df.det, :] # True ID
fal = df.loc[~df.det, :] # False ID
# Inspect by image class and confidence values
# Check how many true/false predictions are at each confidence value
# Class 0 - 'Bad'
c0t = tru.loc[tru['true_id'] == c0, :] # True dets
c0f = fal.loc[fal['true_id'] == c0, :] # False dets
# Class 1 - 'Good'
c1t = tru.loc[tru['true_id'] == c1, :] # True dets
c1f = fal.loc[fal['true_id'] == c1, :] # False dets
return tru, fal, c0t, c0f, c1t, c1f, taxon
# Plot results by image class
def plot_predict_x_conf(tru, fal, c0t, c0f, c1t, c1f, imclasses=imclasses, thresh=thresh):
# Plot parameters to make 1 subplot per image class
kwargs = dict(alpha=0.5, bins=15)
fig, axes = plt.subplots(len(imclasses), figsize=(10, 10), constrained_layout=True)
fig.suptitle('Prediction Confidence by Class\n Overall Accuracy: {}'.format(
format((len(tru)/(len(tru)+len(fal))),'.2f')))
# Make subplots
# Class 0 - 'Bad'
c0 = df.iloc[:, :N] # Pull N items for c0
# True predictions
axes[0].hist(c0t['confidence'], color='y', label='True Det', **kwargs)
# False predictions
axes[0].hist(c0f['confidence'], color='r', label='False Det', **kwargs)
axes[0].set_title("{} (n={} images)\n Accuracy: {}".format(imclasses[0],
len(c0t+c0f), format((len(c0t)/(len(c0t)+len(c0f))),'.2f')))
axes[0].legend();
# Class 1 - 'Good'
c1 = df.iloc[:, N:2*N] # Pull N items for c1
# True predictions
axes[1].hist(c1t['confidence'], color='y', label='True Det', **kwargs)
# False predictions
axes[1].hist(c1f['confidence'], color='r', label='False Det', **kwargs)
axes[1].set_title("{} (n={} images)\n Accuracy: {}".format(imclasses[1],
len(c1t+c1f), format((len(c1t)/(len(c1t)+len(c1f))),'.2f')))
axes[1].legend();
# Add Y-axis labels
for ax in fig.get_axes():
ax.set(ylabel='Freq (# imgs)')
if thresh:
ax.axvline(thresh, color='k', linestyle='dashed', linewidth=1)
return fig
# To save the figure
def save_figure(fig, TRAIN_SESS_NUM=TRAIN_SESS_NUM, taxon=taxon, handle_base=handle_base):
# Make filename
if taxon: # If for a specific taxon
if 'plant' in taxon:
handle_base = handle_base + '_plantae'
elif 'anim' in taxon:
handle_base = handle_base + '_animalia'
figname = TRAIN_SESS_NUM + '_' + handle_base + '.png'
fig.savefig(figname)
print("Histograms saved to ", figname)
return figname
# Load combined prediction results from above
df = all_predictions.copy()
# Optional: Inspect predictions for taxon-specific images only?
# TO DO: If "yes," check box
inspect_by_taxon = False #@param {type:"boolean"}
# Optional: Draw threshold value to help choose optimal balance b/w maximizing useful data and minimizing error
# TO DO: Set threshold value
thresh = 0 #@param {type:"number"}
# Valide predictions by image class (and optionally, by: taxon)
tru, fal, c0t, c0f, c1t, c1f, taxon = validate_predictions(df, inspect_by_taxon)
# Plot results by image class
fig = plot_predict_x_conf(tru, fal, c0t, c0f, c1t, c1f)
# Export histograms
figname = save_figure(fig)
| 0.503906 | 0.943608 |
## Training metrics
*Metrics* for training fastai models are simply functions that take `input` and `target` tensors, and return some metric of interest for training. You can write your own metrics by defining a function of that type, and passing it to [`Learner`](/basic_train.html#Learner) in the [`metrics`](/metrics.html#metrics) parameter, or use one of the following pre-defined functions.
```
from fastai.gen_doc.nbdoc import *
from fastai.basics import *
```
## Predefined metrics:
```
show_doc(accuracy)
jekyll_warn("This metric is intended for classification of objects belonging to a single class.")
preds = tensor([0.4, 0.6], [0.3, 0.7], [0.2, 0.8], [0.6, 0.4], [0.9, 0.1]) # bs = 5, n = 2
ys = tensor([1], [0], [1], [0], [1])
accuracy(preds, ys)
show_doc(accuracy_thresh)
```
Predictions are compared to `thresh` after `sigmoid` is maybe applied. Then we count the numbers that match the targets.
```
jekyll_note("This function is intended for one-hot-encoded targets (often in a multiclassification problem).")
preds = tensor([0.4, 0.6], [0.3, 0.7], [0.2, 0.8], [0.6, 0.4], [0.9, 0.1])
ys = tensor([0, 1], [1, 0], [0, 1], [1, 0], [0, 1])
accuracy_thresh(preds, ys, thresh=0.65, sigmoid=False)
show_doc(top_k_accuracy)
show_doc(dice)
```
$$dice = \frac{2(TP)}{2(TP) + FP + FN}$$
where TP, FP and FN are the number of true positives, false positives and false negatives.
```
preds = tensor([0.4, 0.6], [0.3, 0.7], [0.2, 0.8], [0.6, 0.4], [0.9, 0.1])
ys = tensor([1], [0], [1], [0], [1])
dice(preds, ys) # TP = 2, FP = 1, FN = 1
show_doc(error_rate)
show_doc(mean_squared_error)
show_doc(mean_absolute_error)
show_doc(mean_squared_logarithmic_error)
show_doc(exp_rmspe)
show_doc(root_mean_squared_error)
show_doc(fbeta)
```
`beta` determines the value of the fbeta applied, `eps` is there for numeric stability. If `sigmoid=True`, a sigmoid is applied to the predictions before comparing them to `thresh` then to the targets. See the [F1 score wikipedia page](https://en.wikipedia.org/wiki/F1_score) for details on the fbeta score.
$${F_\beta} = (1+\beta^2)\frac{precision \cdot recall}{(\beta^2 \cdot precision) + recall}$$
```
preds = tensor([0.6, 0.8, 0.2, 0.4, 0.9]).view(1, 5) # TP =2, FP = 1, FN = 1
ys = tensor([1, 0, 0, 1, 1]).view(1, 5)
fbeta(preds, ys, thresh=0.5, sigmoid=False)
jekyll_note("This function is intended for one-hot-encoded targets (often in a multiclassification problem).")
show_doc(explained_variance)
```
$$ Explained \ Variance = 1 - \frac{Var( targ - pred )}{Var( targ )}$$
```
preds = tensor([0.10, .20, .30, .40, .50])
ys = tensor([0.12, .17, .25, .44, .56]) # predictions are close to the truth
explained_variance(preds, ys)
show_doc(r2_score)
```
$$ {R^2} = 1 - \frac{\sum( targ - pred )^2}{\sum( targ - \overline{targ})^2}$$
where $\overline{targ}$ is the mean of the targ tensor.
```
r2_score(preds, ys)
```
The following metrics are classes, don't forget to instantiate them when you pass them to a [`Learner`](/basic_train.html#Learner).
```
show_doc(RMSE, title_level=3)
show_doc(ExpRMSPE, title_level=3)
show_doc(Precision, title_level=3)
show_doc(Recall, title_level=3)
show_doc(FBeta, title_level=3)
show_doc(R2Score, title_level=3)
show_doc(ExplainedVariance, title_level=3)
show_doc(MatthewsCorreff, title_level=3)
```
Ref.: https://github.com/scikit-learn/scikit-learn/blob/bac89c2/sklearn/metrics/classification.py
```
show_doc(KappaScore, title_level=3)
```
Ref.: https://github.com/scikit-learn/scikit-learn/blob/bac89c2/sklearn/metrics/classification.py
[`KappaScore`](/metrics.html#KappaScore) supports linear and quadratic weights on the off-diagonal cells in the [`ConfusionMatrix`](/metrics.html#ConfusionMatrix), in addition to the default unweighted calculation treating all misclassifications as equally weighted. Leaving [`KappaScore`](/metrics.html#KappaScore)'s `weights` attribute as `None` returns the unweighted Kappa score. Updating `weights` to "linear" means off-diagonal ConfusionMatrix elements are weighted in linear proportion to their distance from the diagonal; "quadratic" means weights are squared proportional to their distance from the diagonal.
Specify linear or quadratic weights, if using, by first creating an instance of the metric and then updating the `weights` attribute, similar to as follows:
```
kappa = KappaScore()
kappa.weights = "quadratic"
learn = cnn_learner(data, model, metrics=[error_rate, kappa])
```
```
show_doc(ConfusionMatrix, title_level=3)
show_doc(MultiLabelFbeta, title_level=3)
```
[`MultiLabelFbeta`](/metrics.html#MultiLabelFbeta) implements mutlilabel classification fbeta score similar to [scikit-learn's](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html) as a [`LearnerCallback`](/basic_train.html#LearnerCallback). Average options: ["micro", "macro", "weighted", "none"]. Intended to use with one-hot encoded targets with 1s and 0s.
## Creating your own metric
Creating a new metric can be as simple as creating a new function. If your metric is an average over the total number of elements in your dataset, just write the function that will compute it on a batch (taking `pred` and `targ` as arguments). It will then be automatically averaged over the batches (taking their different sizes into account).
Sometimes metrics aren't simple averages however. If we take the example of precision for instance, we have to divide the number of true positives by the number of predictions we made for that class. This isn't an average over the number of elements we have in the dataset, we only consider those where we made a positive prediction for a specific thing. Computing the precision for each batch, then averaging them will yield to a result that may be close to the real value, but won't be it exactly (and it really depends on how you deal with special case of 0 positive predictions).
This why in fastai, every metric is implemented as a callback. If you pass a regular function, the library transforms it to a proper callback called `AverageCallback`. The callback metrics are only called during the validation phase, and only for the following events:
- <code>on_epoch_begin</code> (for initialization)
- <code>on_batch_begin</code> (if we need to have a look at the input/target and maybe modify them)
- <code>on_batch_end</code> (to analyze the last results and update our computation)
- <code>on_epoch_end</code>(to wrap up the final result that should be added to `last_metrics`)
As an example, the following code is the exact implementation of the [`AverageMetric`](/callback.html#AverageMetric) callback that transforms a function like [`accuracy`](/metrics.html#accuracy) into a metric callback.
```
class AverageMetric(Callback):
"Wrap a `func` in a callback for metrics computation."
def __init__(self, func):
# If it's a partial, use func.func
name = getattr(func,'func',func).__name__
self.func, self.name = func, name
def on_epoch_begin(self, **kwargs):
"Set the inner value to 0."
self.val, self.count = 0.,0
def on_batch_end(self, last_output, last_target, **kwargs):
"Update metric computation with `last_output` and `last_target`."
if not is_listy(last_target): last_target=[last_target]
self.count += last_target[0].size(0)
val = self.func(last_output, *last_target)
self.val += last_target[0].size(0) * val.detach().cpu()
def on_epoch_end(self, last_metrics, **kwargs):
"Set the final result in `last_metrics`."
return add_metrics(last_metrics, self.val/self.count)
```
Here [`add_metrics`](/torch_core.html#add_metrics) is a convenience function that will return the proper dictionary for us:
```python
{'last_metrics': last_metrics + [self.val/self.count]}
```
And here is another example that properly computes the precision for a given class.
```
class Precision(Callback):
def on_epoch_begin(self, **kwargs):
self.correct, self.total = 0, 0
def on_batch_end(self, last_output, last_target, **kwargs):
preds = last_output.argmax(1)
self.correct += ((preds==0) * (last_target==0)).float().sum()
self.total += (preds==0).float().sum()
def on_epoch_end(self, last_metrics, **kwargs):
return add_metrics(last_metrics, self.correct/self.total)
```
The following custom callback class example measures peak RAM usage during each epoch:
```
import tracemalloc
class TraceMallocMetric(Callback):
def __init__(self):
super().__init__()
self.name = "peak RAM"
def on_epoch_begin(self, **kwargs):
tracemalloc.start()
def on_epoch_end(self, last_metrics, **kwargs):
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
return add_metrics(last_metrics, torch.tensor(peak))
```
To deploy it, you need to pass an instance of this custom metric in the [`metrics`](/metrics.html#metrics) argument:
```python
learn = cnn_learner(data, model, metrics=[accuracy, TraceMallocMetric()])
learn.fit_one_cycle(3, max_lr=1e-2)
```
And then the output changes to:
```
Total time: 00:54
epoch train_loss valid_loss accuracy peak RAM
1 0.333352 0.084342 0.973800 2395541.000000
2 0.096196 0.038386 0.988300 2342145.000000
3 0.048722 0.029234 0.990200 2342680.000000
```
As mentioner earlier, using the [`metrics`](/metrics.html#metrics) argument with a custom metrics class is limited in the number of phases of the callback system it can access, it can only return one numerical value and as you can see its output is hardcoded to have 6 points of precision in the output, even if the number is an int.
To overcome these limitations callback classes should be used instead.
For example, the following class:
* uses phases not available for the metric classes
* it reports 3 columns, instead of just one
* its column report ints, instead of floats
```
import tracemalloc
class TraceMallocMultiColMetric(LearnerCallback):
_order=-20 # Needs to run before the recorder
def __init__(self, learn):
super().__init__(learn)
self.train_max = 0
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['used', 'max_used', 'peak'])
def on_batch_end(self, train, **kwargs):
# track max memory usage during the train phase
if train:
current, peak = tracemalloc.get_traced_memory()
self.train_max = max(self.train_max, current)
def on_epoch_begin(self, **kwargs):
tracemalloc.start()
def on_epoch_end(self, last_metrics, **kwargs):
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
return add_metrics(last_metrics, [current, self.train_max, peak])
```
Note, that it subclasses [`LearnerCallback`](/basic_train.html#LearnerCallback) and not [`Callback`](/callback.html#Callback), since the former provides extra features not available in the latter.
Also `_order=-20` is crucial - without it the custom columns will not be added - it tells the callback system to run this callback before the recorder system.
To deploy it, you need to pass the name of the class (not an instance!) of the class in the `callback_fns` argument. This is because the `learn` object doesn't exist yet, and it's required to instantiate `TraceMallocMultiColMetric`. The system will do it for us automatically as soon as the learn object has been created.
```python
learn = cnn_learner(data, model, metrics=[accuracy], callback_fns=TraceMallocMultiColMetric)
learn.fit_one_cycle(3, max_lr=1e-2)
```
And then the output changes to:
```
Total time: 00:53
epoch train_loss valid_loss accuracy used max_used peak
1 0.321233 0.068252 0.978600 156504 2408404 2419891
2 0.093551 0.032776 0.988500 79343 2408404 2348085
3 0.047178 0.025307 0.992100 79568 2408404 2342754
```
Another way to do the same is by using `learn.callbacks.append`, and this time we need to instantiate `TraceMallocMultiColMetric` with `learn` object which we now have, as it is called after the latter was created:
```python
learn = cnn_learner(data, model, metrics=[accuracy])
learn.callbacks.append(TraceMallocMultiColMetric(learn))
learn.fit_one_cycle(3, max_lr=1e-2)
```
Configuring the custom metrics in the `learn` object sets them to run in all future [`fit`](/basic_train.html#fit)-family calls. However, if you'd like to configure it for just one call, you can configure it directly inside [`fit`](/basic_train.html#fit) or [`fit_one_cycle`](/train.html#fit_one_cycle):
```python
learn = cnn_learner(data, model, metrics=[accuracy])
learn.fit_one_cycle(3, max_lr=1e-2, callbacks=TraceMallocMultiColMetric(learn))
```
And to stress the differences:
* the `callback_fns` argument expects a classname or a list of those
* the [`callbacks`](/callbacks.html#callbacks) argument expects an instance of a class or a list of those
* `learn.callbacks.append` expects a single instance of a class
For more examples, look inside fastai codebase and its test suite, search for classes that subclass either [`Callback`](/callback.html#Callback), [`LearnerCallback`](/basic_train.html#LearnerCallback) and subclasses of those two.
Finally, while the above examples all add to the metrics, it's not a requirement. A callback can do anything it wants and it is not required to add its outcomes to the metrics printout.
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(FBeta.on_batch_end)
show_doc(FBeta.on_epoch_begin)
show_doc(FBeta.on_epoch_end)
show_doc(mae)
show_doc(msle)
show_doc(mse)
show_doc(rmse)
show_doc(Precision.on_epoch_end)
show_doc(FBeta.on_train_end)
show_doc(KappaScore.on_epoch_end)
show_doc(MatthewsCorreff.on_epoch_end)
show_doc(FBeta.on_train_begin)
show_doc(RMSE.on_epoch_end)
show_doc(ConfusionMatrix.on_train_begin)
show_doc(ConfusionMatrix.on_batch_end)
show_doc(ConfusionMatrix.on_epoch_end)
show_doc(Recall.on_epoch_end)
show_doc(ExplainedVariance.on_epoch_end)
show_doc(ExpRMSPE.on_epoch_end)
show_doc(ConfusionMatrix.on_epoch_begin)
show_doc(R2Score.on_epoch_end)
```
## New Methods - Please document or move to the undocumented section
|
github_jupyter
|
from fastai.gen_doc.nbdoc import *
from fastai.basics import *
show_doc(accuracy)
jekyll_warn("This metric is intended for classification of objects belonging to a single class.")
preds = tensor([0.4, 0.6], [0.3, 0.7], [0.2, 0.8], [0.6, 0.4], [0.9, 0.1]) # bs = 5, n = 2
ys = tensor([1], [0], [1], [0], [1])
accuracy(preds, ys)
show_doc(accuracy_thresh)
jekyll_note("This function is intended for one-hot-encoded targets (often in a multiclassification problem).")
preds = tensor([0.4, 0.6], [0.3, 0.7], [0.2, 0.8], [0.6, 0.4], [0.9, 0.1])
ys = tensor([0, 1], [1, 0], [0, 1], [1, 0], [0, 1])
accuracy_thresh(preds, ys, thresh=0.65, sigmoid=False)
show_doc(top_k_accuracy)
show_doc(dice)
preds = tensor([0.4, 0.6], [0.3, 0.7], [0.2, 0.8], [0.6, 0.4], [0.9, 0.1])
ys = tensor([1], [0], [1], [0], [1])
dice(preds, ys) # TP = 2, FP = 1, FN = 1
show_doc(error_rate)
show_doc(mean_squared_error)
show_doc(mean_absolute_error)
show_doc(mean_squared_logarithmic_error)
show_doc(exp_rmspe)
show_doc(root_mean_squared_error)
show_doc(fbeta)
preds = tensor([0.6, 0.8, 0.2, 0.4, 0.9]).view(1, 5) # TP =2, FP = 1, FN = 1
ys = tensor([1, 0, 0, 1, 1]).view(1, 5)
fbeta(preds, ys, thresh=0.5, sigmoid=False)
jekyll_note("This function is intended for one-hot-encoded targets (often in a multiclassification problem).")
show_doc(explained_variance)
preds = tensor([0.10, .20, .30, .40, .50])
ys = tensor([0.12, .17, .25, .44, .56]) # predictions are close to the truth
explained_variance(preds, ys)
show_doc(r2_score)
r2_score(preds, ys)
show_doc(RMSE, title_level=3)
show_doc(ExpRMSPE, title_level=3)
show_doc(Precision, title_level=3)
show_doc(Recall, title_level=3)
show_doc(FBeta, title_level=3)
show_doc(R2Score, title_level=3)
show_doc(ExplainedVariance, title_level=3)
show_doc(MatthewsCorreff, title_level=3)
show_doc(KappaScore, title_level=3)
kappa = KappaScore()
kappa.weights = "quadratic"
learn = cnn_learner(data, model, metrics=[error_rate, kappa])
show_doc(ConfusionMatrix, title_level=3)
show_doc(MultiLabelFbeta, title_level=3)
class AverageMetric(Callback):
"Wrap a `func` in a callback for metrics computation."
def __init__(self, func):
# If it's a partial, use func.func
name = getattr(func,'func',func).__name__
self.func, self.name = func, name
def on_epoch_begin(self, **kwargs):
"Set the inner value to 0."
self.val, self.count = 0.,0
def on_batch_end(self, last_output, last_target, **kwargs):
"Update metric computation with `last_output` and `last_target`."
if not is_listy(last_target): last_target=[last_target]
self.count += last_target[0].size(0)
val = self.func(last_output, *last_target)
self.val += last_target[0].size(0) * val.detach().cpu()
def on_epoch_end(self, last_metrics, **kwargs):
"Set the final result in `last_metrics`."
return add_metrics(last_metrics, self.val/self.count)
{'last_metrics': last_metrics + [self.val/self.count]}
class Precision(Callback):
def on_epoch_begin(self, **kwargs):
self.correct, self.total = 0, 0
def on_batch_end(self, last_output, last_target, **kwargs):
preds = last_output.argmax(1)
self.correct += ((preds==0) * (last_target==0)).float().sum()
self.total += (preds==0).float().sum()
def on_epoch_end(self, last_metrics, **kwargs):
return add_metrics(last_metrics, self.correct/self.total)
import tracemalloc
class TraceMallocMetric(Callback):
def __init__(self):
super().__init__()
self.name = "peak RAM"
def on_epoch_begin(self, **kwargs):
tracemalloc.start()
def on_epoch_end(self, last_metrics, **kwargs):
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
return add_metrics(last_metrics, torch.tensor(peak))
learn = cnn_learner(data, model, metrics=[accuracy, TraceMallocMetric()])
learn.fit_one_cycle(3, max_lr=1e-2)
Total time: 00:54
epoch train_loss valid_loss accuracy peak RAM
1 0.333352 0.084342 0.973800 2395541.000000
2 0.096196 0.038386 0.988300 2342145.000000
3 0.048722 0.029234 0.990200 2342680.000000
import tracemalloc
class TraceMallocMultiColMetric(LearnerCallback):
_order=-20 # Needs to run before the recorder
def __init__(self, learn):
super().__init__(learn)
self.train_max = 0
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['used', 'max_used', 'peak'])
def on_batch_end(self, train, **kwargs):
# track max memory usage during the train phase
if train:
current, peak = tracemalloc.get_traced_memory()
self.train_max = max(self.train_max, current)
def on_epoch_begin(self, **kwargs):
tracemalloc.start()
def on_epoch_end(self, last_metrics, **kwargs):
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
return add_metrics(last_metrics, [current, self.train_max, peak])
learn = cnn_learner(data, model, metrics=[accuracy], callback_fns=TraceMallocMultiColMetric)
learn.fit_one_cycle(3, max_lr=1e-2)
Total time: 00:53
epoch train_loss valid_loss accuracy used max_used peak
1 0.321233 0.068252 0.978600 156504 2408404 2419891
2 0.093551 0.032776 0.988500 79343 2408404 2348085
3 0.047178 0.025307 0.992100 79568 2408404 2342754
learn = cnn_learner(data, model, metrics=[accuracy])
learn.callbacks.append(TraceMallocMultiColMetric(learn))
learn.fit_one_cycle(3, max_lr=1e-2)
learn = cnn_learner(data, model, metrics=[accuracy])
learn.fit_one_cycle(3, max_lr=1e-2, callbacks=TraceMallocMultiColMetric(learn))
show_doc(FBeta.on_batch_end)
show_doc(FBeta.on_epoch_begin)
show_doc(FBeta.on_epoch_end)
show_doc(mae)
show_doc(msle)
show_doc(mse)
show_doc(rmse)
show_doc(Precision.on_epoch_end)
show_doc(FBeta.on_train_end)
show_doc(KappaScore.on_epoch_end)
show_doc(MatthewsCorreff.on_epoch_end)
show_doc(FBeta.on_train_begin)
show_doc(RMSE.on_epoch_end)
show_doc(ConfusionMatrix.on_train_begin)
show_doc(ConfusionMatrix.on_batch_end)
show_doc(ConfusionMatrix.on_epoch_end)
show_doc(Recall.on_epoch_end)
show_doc(ExplainedVariance.on_epoch_end)
show_doc(ExpRMSPE.on_epoch_end)
show_doc(ConfusionMatrix.on_epoch_begin)
show_doc(R2Score.on_epoch_end)
| 0.835013 | 0.978692 |
[View in Colaboratory](https://colab.research.google.com/github/soryeongk/TWL/blob/master/soryeongk/180716_python.ipynb)
나누어 정복하기 전략(Divide-and-Conquer)을 사용하는 것이 좋음
- 어려운 문제는 충분히 작은 문제로 나누어 풀기
- ex. (랜덤한 수를 만드는 문제) + (랜덤한 수를 이용해서 리스트를 섞는 문제)
송서영님, 김소령님, 안세영님, 이지윤님의 random.shuffle()을 이용한 방법.
* 가장 편리하고 빠른 방법입니다.
```
import random
def shuffle(data):
copied = data[:]
random.shuffle(copied)
return copied
data = [1, 2, 3, 4, 5]
copied = shuffle(data)
copied
```
이다혜님과 김예은님의 `random.sample()`을 활용한 방법.
* 데이터의 수만큼 비복원 임의추출(random sampling without replacement)을 수행
* 좋은 결과를 보장하는 방법 중 하나
* random.sample(data, n)
- data에 있는 요소들 중에서 3개만 뽑아서 임의적으로 샘플링을 해줌.
- 비복원방식으로 추출하기 때문에, 중복이 없음
```
import random
data = [1, 2, 3, 4, 5, 6]
random.sample(data, len(data))
```
김희영님의 길이에 따른 정렬 방법.
* "길이"라는 기준이 너무 규칙적이어서 문제이지만 기준을 충분히 복잡하게 바꿔주면 경우에 따라 좋은 방식
* "길이" 대신 해시값(hash value)을 사용하면 hash-based sampling이라는 임의 추출 방법이 됩니다. 이후에 배울 내용 중 하나.
* 정렬의 기준을 달리함으로써 정렬도 셔플이 될 수 있음
```
def sort_by_len(data): # 인자의 길이 순으로 리스트를 출력
return sorted(data, key=len)
d= ["chicken", "bentto", "pizza", "chinese", "icecream", "milk", "pretzel"]
print(sort_by_len(d)) # 장점 : 한 번 쓰기에는 좋다. / 단점 : 랜덤하다고 말할 수 없을 정도로 규칙이 확실한 리스트이다ㅠㅠ
```
이지윤님의 현재 시간을 이용한 랜덤.
* 실제 세상으로부터 노이즈(현재 시간, 마우스 위치, 베터리 잔량, 키보드 입력 패턴 등등)를 끌어와서 랜덤 요소로 쓰는 시도가 많이 있습니다.
* 다만 "시간(realtime clock)"만 쓰기에는 요즘 컴퓨터가 너무 빨라서 문제.
```
# 현재 시간을 이용한 랜덤
import time
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def shuffle1(data):
now = int(time.time())
random = now % 10
result = data.copy()
length = len(data)-1
for num in range(length) :
temp = result[num]
result[num] = result[length - random]
result[length-random] = temp
return result
print('shuffle ', shuffle1(data))
print('원본 ', data)
```
안세영님의 임의의 위치에서 뽑아서 또다른 임의의 위치에 넣기를 반복.
* 몇 번 반복해야 최적인지를 정할 필요가 있음
```
import random
a = [1, 2, 3, 4, 5]
def shuffle2(data):
n = len(data)
shuffled_data = data[:]
for i in range(20):
random_index = random.randint(0, n-1)
random_index2 = random.randint(0, n-1)
value = shuffled_data.pop(random_index)
shuffled_data.insert(random_index2, value)
return shuffled_data
print(a)
print(shuffle2(a))
```
이은지님의 Case 3:
* 파이썬 내장 함수와 동일한 품질의 결과를 내는 방식. 다만 성능 개선 여지가 있음
* 이다혜님, 정지은님, 정지혜님, 김소령님도 유사한 방식을 시도하였음
* `data4`는 함수 내부로 옮기기
* 학습 초기엔 아래와 같이 주석을 꼼꼼하게 쓰는 연습을 하면 좋음. 다만 `git diff` 등을 생각하면 칼럼을 한 줄로 맞추지 않는 편이 좋습니다.
```
import random # Random 모듈 호출
data = ["Heroes", "Bears", "Eagles", "Twins", "Wyverns", "Lions", "Giants", "Dinos", "Tigers", "Wiz"]
def shuffle2(data):
# 결과값 입력을 위한 리스트 생성
data_4 = []
# 무한 반복. 이 부분을 for 문으로 처리하면 결과값 리스트가 모두 채워지지 않아도 함수가 끝나는
# 오류가 발생.
while True:
if len(data) == len(data4): # 원본 리스트(data)와 결과값 리스트(data4)의 인덱스가 동일해지면...
break # 함수 종료
else: # 원본 리스트(data)와 결과값 리스트(data4)의 인덱스가 동일하지 않으면...
choice = random.choice(data) # 원본 리스트(data)의 요소 중 하나를 random하게 선택
if choice in data4: # 선택된 요소(choice)가 결과값 리스트(data4)에 이미 존재한다면...
pass # 아무 변화 없이 pass
else: # 선택된 요소가 결과값 리스트에 없다면...
data4.append(choice) # 결과값 리스트에 선택된 요소를 추가
return data4
data_4th = shuffle2(data)
print(data_4th)
print(data)
```
박소현님의 03번, 장예빈님의 3번:
* Fisher-Yates sampling 알고리즘과 동일
* 개선의 여지가 있음 (in-place swapping)
* 피셔-예이츠 셔플링(Fisher-Yates shuffling) 알고리즘 소개
- Ronald Fisher와 Frank Yates가 1938에 <Statistical tables for biological,
agricultural and medical research>에서 소개한 절차.
- “난수표"랑 종이와 연필이 필요.
- 알고리즘 시각화, 알고리즘 수행 결과의 시각화 (데이터 시각화의 또다른 활용 사례)
```
from random import randint
def shuffle3(data):
new_list = data.copy()
result = []
x = 0
for x in range(len(data)):
index = (randint(0, len(new_list) - 1))
result.append(new_list[index])
del new_list[index]
print(result)
shuffle3([1, 2, 3, 4, 5])
```
기타:
* 김예은님: 평가 기준(수행 속도가 얼마나 빠른가)을 미리 정한 점, 측정을 자동화하기 위한 함수를 만든 점이 훌륭합니다.
* 평가기준을 미리 정해서 측정을 자동화하기 위한 함수를 만들어두는 것도 매우 좋음
# 크롤링 - 오전
* 인터넷, 웹, 웹 브라우저, HTML
* 인터넷(The Internet)
: 컴퓨터 네트워크들의 네트워크(inter-network)
* 웹(web) 또는 월드 와이드 웹(World Wide Web; WWW)
- HTTP(HyperText Transfer Protocol)로 통신하는 컴퓨터들의 네트워크.
- 프로토콜? 양방향 라디오 통신 프로토콜 사례
: Over, Roger, Affirmative, Negative, Out
- HTTP는 인터넷 프로토콜 “위에" 구현되어 있음. 웹은 인터넷의 일부
* 웹이 아닌 인터넷은 뭐가 있을까? : 게임, 카카오톡, 이메일 등
* 웹에서 가장 중요한 개념:
- URL(Uniform Resource Locator):
형식
- protocol://user@host:port/path?query#fragment
- https://www.google.com/search?q=test
- https는 조금 더 안전한, 신뢰할 수 있는 사이트임을 의미
- Hyperlink, Hypertext, HTML (HyperText Markup Language)
- 웹 문서(HTML)의 구조 3D로 보기
```
from urllib import request
"""import urllib.request로도 쓸 수 있지만,
아래의 request를 urllib.request라고 써야 함"""
url = "https://www.naver.com"
with request.urlopen(url) as f:
html = f.read().decode('utf-8')
print(html)
```
## 웹 크롤링 연습
* 프로그래밍에서 중요한 것은 자원을 관리하는 것
* 메모리가 자원 중 하나로, 사용하지 않는 메모리를 알아서 수거하기도 하지만,
그러지 못하는 경우도 많음
* 예를 들면, 자료를 열어서 읽은 후에는 다시 닫아 둬야 함
* 자료의 내용을 f라는 함수로 담아두었다가 사용이 끝나면 다시 버림
- with 어쩌고 as 저쩌고:
= 어쩌고가 만든 자원을 저쩌고에 할당하고 다 쓴 다음에는 자원 사용을 해제
- 리눅스에서는 되도록 많은 것을 file에 담고자 함
그래서 url 자원도 f(file)에 담는 것
프린트, 키보드 등도 파일로서, 파일을 열어서 입출력을 받고 다시 닫음
```
!pip install html5lib #파이썬이 아닌 리눅스 명령어 구글 컴퓨터에 패키지를 설치
```
# 크롤링 - 오후
## 저작권, 라이선스
그런데 웹에 있는 자료는 누구나 어떠한 용도로나 써도 되는걸까?
* 저작권(copyright), 라이선스(license)
- 대부분의 자료들은 누군가에게 권한이 있음
* 소프트웨어 저작권과 데이터/콘텐츠 저작권
- 라이선스를 어딘가에 명시해두기도 함
* CCL, 공공누리
* 로봇 배제 프로토콜 robots.txt
- https://www.daum.net/robots.txt (다음)
- https://www.naver.com/robots.txt (네이버)
- https://www1.president.go.kr/robots.txt (청와대국민청원)
* 호출 간격 제한(rate limits)과 방화벽(firewall)
## 마크업(markup)
* 기존 문서에 기입하는 부가적인 정보
* 문서의 구조에 대한 정보, 맞춤법 교정 등
### 마크업 언어(markup language)
* 마크업 정보를 컴퓨터로 일관되게 표기위한 인공 언어
- ex. mark down 경량의 언어
- HTML : 하이퍼텍스트에 대한 마크업을 표기위한 인공 언어
하이퍼텍스트 : 하이퍼 링크가 담겨 있는 텍스트
* 만들어보기
\^^/제목: 돌아온' 오늘 MBC 뉴스의 첫 앵커 멘트는 '사과'였다
\^^/날짜: 2017년 12월 08일 15시 27분, ^^\*작성자: 허완 기자, ^^\*매체: 허핑턴포스트코리아
\^^/서론:
\^^/인용: _“저희 MBC는 신임 최승호 사장의 취임에 맞춰, 오늘(8일)부터 뉴스데스크 앵커를 교체하고 당분간 뉴스를 임시체제로 진행합니다. 저희들은 재정비 기간 동안 MBC 보도가 시청자 여러분께 남긴 상처들을 거듭 되새기며, 철저히 반성하는 시간을 갖겠습니다. 치밀한 준비를 거쳐 빠른 시일 안에 정확하고 겸손하고 따뜻한 뉴스데스크로 시청자 여러분께 다시 인사드리겠습니다.”_
\^^/본론: 8일 저녁 8시, MBC 메인뉴스인 '뉴스데스크' 대신 'MBC뉴스'라는 타이틀로 방송된 뉴스에서 임시 앵커를 맡은 김수지 아나운서는 짤막한 사과문을 읽어내려갔다.
## HTML 분석하여 DOM 트리 구성하기
* HTML은 긴 문자열. 긴 문자열을 분석(parse)하여
나무구조(tree structure)로 만든 것이 HTML DOM(Document Object Model) 트리.
* 내가 자주 가는 사이트의 DOM 트리 살펴보기 (브라우저의 개발자 도구 활용).
### DOM 트리 구성하기. 외부 라이브러리: html5lib
* HTML을 더 공부해보고 싶으면?
- 생활코딩 WEB1 - 쉽고 친절하고 내용도 비교적 정확한 자습 사이트
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8>"
<title>
<style>
h1 {
color : red;
}
</style>
</title>
</head>
<body>
<h1>'돌아온' 오늘 MBC 뉴스의 첫 앵커 멘트는 '사과'였다.</h1>
<p>2017년 12월 08일 15시 27분, 허완 기자, 허핑턴포스트코리아</p>
<blockquote>
<p>“저희 MBC는 신임 최승호 사장의 취임에 맞춰, 오늘(8일)부터 뉴스데스크 앵커를 교체하고 당분간 뉴스를 임시체제로 진행합니다. 저희들은 재정비 기간 동안 MBC 보도가 시청자 여러분께 남긴 상처들을 거듭 되새기며, 철저히 반성하는 시간을 갖겠습니다.</p>
<p>치밀한 준비를 거쳐 빠른 시일 안에 정확하고 겸손하고 따뜻한 뉴스데스크로 시청자 여러분께 다시 인사드리겠습니다.”</p>
</blockquote>
<p>8일 저녁 8시, MBC 메인뉴스인 '뉴스데스크' 대신 'MBC뉴스'라는 타이틀로 방송된 뉴스에서 임시 앵커를 맡은 김수지 아나운서는 짤막한 사과문을 읽어내려갔다.</p>
</body>
</html>
```
from urllib import request
"""import urllib.request로도 쓸 수 있지만,
아래의 request를 urllib.request라고 써야 함"""
import html5lib
"""이 코드 내에서 패키지를 불러옴"""
url = "https://www.naver.com"
with request.urlopen(url) as f:
html = f.read().decode('utf-8')
html5lib.parse(html)
dom = html5lib.parse(html)
children = dom.getchildren()
"""getchildren()을 시행하면 그 결과는 리스트임을 확인할 수 있음
따라서 children은 리스트 타입의 변수임"""
head = children[0] # head를 의미 / head에 있는 title이라는 키워드를 찾아 뽑아내는 것을 할 것
"""태그이름을 불러오는 것과 태그 내의 내용을 찾아오는 것이 필요"""
for tag_name in head.getchildren():
if 'title' in str(tag_name):
print(tag_name.text)
break
from urllib import request
"""import urllib.request로도 쓸 수 있지만,
아래의 request를 urllib.request라고 써야 함"""
import html5lib
"""이 코드 내에서 패키지를 불러옴"""
url = "https://www.naver.com"
with request.urlopen(url) as f:
html = f.read().decode('utf-8')
html5lib.parse(html)
dom = html5lib.parse(html)
children = dom.getchildren()
"""getchildren()을 시행하면 그 결과는 리스트임을 확인할 수 있음
따라서 children은 리스트 타입의 변수임"""
head = children[0] # head를 의미 / head에 있는 title이라는 키워드를 찾아 뽑아내는 것을 할 것
"""태그이름을 불러오는 것과 태그 내의 내용을 찾아오는 것이 필요"""
for element in head.getchildren():
if element.tag[-5:] == 'title': # 조금 더 엄밀학 하기 위함
print(element.text)
break
from urllib import request
"""import urllib.request로도 쓸 수 있지만,
아래의 request를 urllib.request라고 써야 함"""
import html5lib
"""이 코드 내에서 패키지를 불러옴"""
url = "https://www.naver.com"
with request.urlopen(url) as f:
html = f.read().decode('utf-8')
html5lib.parse(html)
dom = html5lib.parse(html)
children = dom.getchildren()
"""getchildren()을 시행하면 그 결과는 리스트임을 확인할 수 있음
따라서 children은 리스트 타입의 변수임"""
head = children[0] # head를 의미 / head에 있는 title이라는 키워드를 찾아 뽑아내는 것을 할 것
"""태그이름을 불러오는 것과 태그 내의 내용을 찾아오는 것이 필요"""
head.getchildren()[0]
```
|
github_jupyter
|
import random
def shuffle(data):
copied = data[:]
random.shuffle(copied)
return copied
data = [1, 2, 3, 4, 5]
copied = shuffle(data)
copied
import random
data = [1, 2, 3, 4, 5, 6]
random.sample(data, len(data))
def sort_by_len(data): # 인자의 길이 순으로 리스트를 출력
return sorted(data, key=len)
d= ["chicken", "bentto", "pizza", "chinese", "icecream", "milk", "pretzel"]
print(sort_by_len(d)) # 장점 : 한 번 쓰기에는 좋다. / 단점 : 랜덤하다고 말할 수 없을 정도로 규칙이 확실한 리스트이다ㅠㅠ
# 현재 시간을 이용한 랜덤
import time
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def shuffle1(data):
now = int(time.time())
random = now % 10
result = data.copy()
length = len(data)-1
for num in range(length) :
temp = result[num]
result[num] = result[length - random]
result[length-random] = temp
return result
print('shuffle ', shuffle1(data))
print('원본 ', data)
import random
a = [1, 2, 3, 4, 5]
def shuffle2(data):
n = len(data)
shuffled_data = data[:]
for i in range(20):
random_index = random.randint(0, n-1)
random_index2 = random.randint(0, n-1)
value = shuffled_data.pop(random_index)
shuffled_data.insert(random_index2, value)
return shuffled_data
print(a)
print(shuffle2(a))
import random # Random 모듈 호출
data = ["Heroes", "Bears", "Eagles", "Twins", "Wyverns", "Lions", "Giants", "Dinos", "Tigers", "Wiz"]
def shuffle2(data):
# 결과값 입력을 위한 리스트 생성
data_4 = []
# 무한 반복. 이 부분을 for 문으로 처리하면 결과값 리스트가 모두 채워지지 않아도 함수가 끝나는
# 오류가 발생.
while True:
if len(data) == len(data4): # 원본 리스트(data)와 결과값 리스트(data4)의 인덱스가 동일해지면...
break # 함수 종료
else: # 원본 리스트(data)와 결과값 리스트(data4)의 인덱스가 동일하지 않으면...
choice = random.choice(data) # 원본 리스트(data)의 요소 중 하나를 random하게 선택
if choice in data4: # 선택된 요소(choice)가 결과값 리스트(data4)에 이미 존재한다면...
pass # 아무 변화 없이 pass
else: # 선택된 요소가 결과값 리스트에 없다면...
data4.append(choice) # 결과값 리스트에 선택된 요소를 추가
return data4
data_4th = shuffle2(data)
print(data_4th)
print(data)
from random import randint
def shuffle3(data):
new_list = data.copy()
result = []
x = 0
for x in range(len(data)):
index = (randint(0, len(new_list) - 1))
result.append(new_list[index])
del new_list[index]
print(result)
shuffle3([1, 2, 3, 4, 5])
from urllib import request
"""import urllib.request로도 쓸 수 있지만,
아래의 request를 urllib.request라고 써야 함"""
url = "https://www.naver.com"
with request.urlopen(url) as f:
html = f.read().decode('utf-8')
print(html)
!pip install html5lib #파이썬이 아닌 리눅스 명령어 구글 컴퓨터에 패키지를 설치
from urllib import request
"""import urllib.request로도 쓸 수 있지만,
아래의 request를 urllib.request라고 써야 함"""
import html5lib
"""이 코드 내에서 패키지를 불러옴"""
url = "https://www.naver.com"
with request.urlopen(url) as f:
html = f.read().decode('utf-8')
html5lib.parse(html)
dom = html5lib.parse(html)
children = dom.getchildren()
"""getchildren()을 시행하면 그 결과는 리스트임을 확인할 수 있음
따라서 children은 리스트 타입의 변수임"""
head = children[0] # head를 의미 / head에 있는 title이라는 키워드를 찾아 뽑아내는 것을 할 것
"""태그이름을 불러오는 것과 태그 내의 내용을 찾아오는 것이 필요"""
for tag_name in head.getchildren():
if 'title' in str(tag_name):
print(tag_name.text)
break
from urllib import request
"""import urllib.request로도 쓸 수 있지만,
아래의 request를 urllib.request라고 써야 함"""
import html5lib
"""이 코드 내에서 패키지를 불러옴"""
url = "https://www.naver.com"
with request.urlopen(url) as f:
html = f.read().decode('utf-8')
html5lib.parse(html)
dom = html5lib.parse(html)
children = dom.getchildren()
"""getchildren()을 시행하면 그 결과는 리스트임을 확인할 수 있음
따라서 children은 리스트 타입의 변수임"""
head = children[0] # head를 의미 / head에 있는 title이라는 키워드를 찾아 뽑아내는 것을 할 것
"""태그이름을 불러오는 것과 태그 내의 내용을 찾아오는 것이 필요"""
for element in head.getchildren():
if element.tag[-5:] == 'title': # 조금 더 엄밀학 하기 위함
print(element.text)
break
from urllib import request
"""import urllib.request로도 쓸 수 있지만,
아래의 request를 urllib.request라고 써야 함"""
import html5lib
"""이 코드 내에서 패키지를 불러옴"""
url = "https://www.naver.com"
with request.urlopen(url) as f:
html = f.read().decode('utf-8')
html5lib.parse(html)
dom = html5lib.parse(html)
children = dom.getchildren()
"""getchildren()을 시행하면 그 결과는 리스트임을 확인할 수 있음
따라서 children은 리스트 타입의 변수임"""
head = children[0] # head를 의미 / head에 있는 title이라는 키워드를 찾아 뽑아내는 것을 할 것
"""태그이름을 불러오는 것과 태그 내의 내용을 찾아오는 것이 필요"""
head.getchildren()[0]
| 0.181626 | 0.981753 |
```
from collections import defaultdict
import random
import numpy as np
```
## About dataset
- each line contains: ID, Word, Base, POS, POS2, ?, Head, Type
- Tab-separated columns, sentences separated by space
```
train_file_path = "../data/mstparser-en-train.dep"
test_file_path = "../data/mstparser-en-test.dep"
def load_dataset(path, encoding="utf-8"):
with open(path, encoding=encoding) as f:
dataset = f.readlines()
return dataset
```
- How to create features for graph-based dependency parsers?
- I employ the features which appear frequenctly in training dataset.
```
class Dataset:
def __init__(self, dataset):
"""
The format of dataset should be CoNLL.
Tab-separated columns, sentences separated by space.
each line contains: ID, Word, Base, POS, POS2, ?, Head, Type.
"""
self.data = self._format(dataset)
def __len__(self):
return len(self.data)
def __getitem__(self, key):
if isinstance(key, slice):
return [self.data[i] for i in range(*key.indices(len(self.data)))]
elif isinstance(key, int):
if key < 0:
key += len(self.data)
if key < 0 or key >= len(self.data):
raise IndexError
return self.data[key]
else:
raise TypeError
def _format(self, dataset):
batch = []
for i in range(len(dataset)):
if dataset[i] == "\n":
first = i - int(dataset[i-1].split("\t")[0])
batch.append([line.split("\n")[0].split("\t") for line in dataset[first:i]])
return batch
class FeatureExtractor:
def __init__(self, dataset, lr=0.01, topn=30, init_func=None):
self.dataset = dataset
self.lr = lr
self.topn = topn
self.init_func = init_func
self.features = None
self.weight = None
def get_weights(self):
self.features = self._get_features()
self.weights = self._update_weights()
return self.weights
def _count_features(self):
cnt = defaultdict(lambda: defaultdict(int))
for batch in self.dataset:
for i, data_i in enumerate(batch):
cnt["word"][data_i[1]] += 1
cnt["pos"][data_i[3]] += 1
cnt["dep"][data_i[-1]] += 1
if data_i[-1] == "ROOT":
cnt["pos_pos"][f"{data_i[3]}_ROOT"] += 1
cnt["word_pos"][f"{data_i[1]}_ROOT"] += 1
cnt["dep"][f"{data_i[-1]}_ROOT"] += 1
else:
head_idx = i - int(data_i[0]) + int(data_i[6])
head = batch[head_idx]
cnt["word_word"][f"{data_i[1]}_{head[1]}"] += 1
cnt["pos_pos"][f"{data_i[3]}_{head[3]}"] += 1
cnt["word_pos"][f"{data_i[1]}_{head[3]}"] += 1
cnt["pos_word"][f"{data_i[3]}_{head[1]}"] += 1
cnt["dep_word"][f"{data_i[-1]}_{head[1]}"] += 1
cnt["dep_pos"][f"{data_i[-1]}_{head[3]}"] += 1
return cnt
def _get_features(self):
"""
This function extract features for graph-based parser.
Features include word form, pos, combination of pos and word form.
"""
cnt = self._count_features()
features = []
for feat_type in cnt.keys():
if feat_type == "pos_pos":
for feat, v in cnt[feat_type].items():
if v >= 5:
features.append(feat)
else:
for feat, v in sorted(cnt[feat_type].items(), key=lambda i: i[1], reverse=True)[:self.topn]:
features.append(feat)
return features
def _init_weights(self):
weights = defaultdict(int)
for feat in self.features:
if self.init_func:
weights[feat] = self.init_func()
else:
weights[feat] = random.random()
return weights
def _update_weights(self):
w = self._init_weights()
for batch in self.dataset:
score = defaultdict(int)
for i, data_i in enumerate(batch):
for j, data_j in enumerate(batch):
if i == j:
continue
score[j] += w[data_i[1]]
score[j] += w[data_i[3]]
score[j] += w[data_i[-1]]
score[j] += w[f"{data_i[1]}_{data_j[1]}"]
score[j] += w[f"{data_i[1]}_{data_j[3]}"]
score[j] += w[f"{data_i[3]}_{data_j[1]}"]
score[j] += w[f"{data_i[3]}_{data_j[3]}"]
score[j] += w[f"{data_i[-1]}_{data_j[1]}"]
score[j] += w[f"{data_i[-1]}_{data_j[3]}"]
score["ROOT"] += w[f"{data_i[1]}_ROOT"]
score["ROOT"] += w[f"{data_i[3]}_ROOT"]
score["ROOT"] += w[f"{data_i[-1]}_ROOT"]
if len(score) == 0:
continue
head_pred = sorted(score.items(), key=lambda i: i[1], reverse=True)[0][0]
head_true = data_i[6]
if head_pred == "ROOT":
if data_i[-1] != "ROOT":
w[data_i[1]] -= self.lr
w[data_i[3]] -= self.lr
w[f"{data_i[1]}_ROOT"] -= self.lr
w[f"{data_i[3]}_ROOT"] -= self.lr
w[f"{data_i[-1]}_ROOT"] -= self.lr
elif head_pred != head_true:
data_j = batch[head_pred]
w[data_i[1]] -= self.lr
w[data_i[3]] -= self.lr
w[data_i[-1]] -= self.lr
w[f"{data_i[1]}_{data_j[1]}"] -= self.lr
w[f"{data_i[1]}_{data_j[3]}"] -= self.lr
w[f"{data_i[3]}_{data_j[1]}"] -= self.lr
w[f"{data_i[3]}_{data_j[3]}"] -= self.lr
w[f"{data_i[-1]}_{data_j[1]}"] -= self.lr
w[f"{data_i[-1]}_{data_j[3]}"] -= self.lr
return w
train_dataset = Dataset(load_dataset(train_file_path))
test_dataset = Dataset(load_dataset(test_file_path))
class MSTParser:
def __init__(self, extractor):
self.w = extractor.get_weights()
def __call__(self, batch):
return self.parse(batch)
def parse(self, batch):
scores = self._score(batch)
best_edges = self._get_best_edges(scores)
scores = self._subtract(scores, best_edges)
best_edges = self._get_best_edges(scores)
status, cycles = self._involveCycle([edge for edge, score in best_edges])
if status:
best_edges = self._remove_cycles(scores, cycles, best_edges)
return [edge for edge, score in best_edges[1:]]
def evaluate(self, dataset):
UAS_SUM = 0
for batch in dataset:
# Parse a sentence
scores = self._score(batch)
best_edges = self._get_best_edges(scores)
scores = self._subtract(scores, best_edges)
best_edges = self._get_best_edges(scores)
status, cycles = self._involveCycle([edge for edge, score in best_edges])
if status:
best_edges = self._remove_cycles(scores, cycles, best_edges)
y_test = self._extract_test(batch)
y_pred = [edge for edge, score in best_edges[1:]]
UAS_SUM += self._get_UAS(y_test, y_pred)
return UAS_SUM / len(dataset)
def _score(self, batch):
N = len(batch)
scores = np.ones([N+1, N+1]) * -1e3
for i in range(1, N+1):
for j in range(N+1):
if i == j:
continue
score = 0
score += self.w[batch[i-1][1]] #word
score += self.w[batch[i-1][3]] # pos
score += self.w[batch[i-1][-1]]
if j == 0:
score += self.w[f"{batch[i-1][1]}_ROOT"] + 1
score += self.w[f"{batch[i-1][3]}_ROOT"] + 1
score += self.w[f"{batch[i-1][-1]}_ROOT"] + 1
else:
score += self.w[f"{batch[i-1][1]}_{batch[j-1][1]}"]
score += self.w[f"{batch[i-1][1]}_{batch[j-1][3]}"]
score += self.w[f"{batch[i-1][3]}_{batch[j-1][1]}"]
score += self.w[f"{batch[i-1][3]}_{batch[j-1][3]}"]
score += self.w[f"{batch[i-1][-1]}_{batch[j-1][1]}"]
score += self.w[f"{batch[i-1][-1]}_{batch[j-1][3]}"]
scores[i, j] = score
return scores
def _get_best_edges(self, scores):
# ignore 0-th row because it would contain scores between ROOT as dependent and words as head
return [(np.argmax(scores[i, :]),
np.max(scores[i, :]))
if i != 0 else (-1, -1e3)
for i in range(scores.shape[0])
]
# best_edges = []
# root_child_idx = np.argmax(scores[:, 0])
# for i in range(scores.shape[0]):
# if i == root_child_idx:
# best_edges.append((0, scores[root_child_idx, 0]))
# elif i == 0:
# best_edges.append((-1, -1e3))
# else:
# head_idx = np.argmax(scores[i, 1:])
# best_edges.append((head_idx, scores[i, head_idx]))
# return best_edges
def _subtract(self, scores, best_edges):
N = scores.shape[0]
for i in range(N):
for j in range(N):
if i == 0 or i == j:
continue
scores[i, j] -= best_edges[i][1]
return scores
def _involveCycle(self, edges):
memory = []
for dep, head in enumerate(edges):
dep_ = edges[head]
if dep == dep_ and (sorted([dep, head]) not in memory):
memory.append(sorted([dep, head]))
if memory:
return (True, memory)
else:
return (False, [])
def _remove_cycles(self, scores, cycles, best_edges):
N = scores.shape[0]
for cycle in cycles:
scores_ = scores.copy()
scores_[cycle[0], cycle[1]] = -1e3
scores_[cycle[1], cycle[0]] = -1e3
node, j = divmod(np.argmax(scores_[cycle, :]), N)
if node == 0:
c_head = cycle[0]
else:
c_head = cycle[1]
best_edges[c_head] = (j, scores[c_head, j])
return best_edges
def _extract_test(self, batch):
return [int(data[6]) for data in batch]
def _get_UAS(self, y_test, y_pred):
assert len(y_test) == len(y_pred)
match_num = 0
for test, pred in zip(y_test, y_pred):
if test == pred:
match_num += 1
return match_num / len(y_test)
extractor = FeatureExtractor(train_dataset, topn=100, lr=0.1, init_func=None)
parser = MSTParser(extractor)
parser.evaluate(test_dataset)
random.choice(train_dataset)
```
|
github_jupyter
|
from collections import defaultdict
import random
import numpy as np
train_file_path = "../data/mstparser-en-train.dep"
test_file_path = "../data/mstparser-en-test.dep"
def load_dataset(path, encoding="utf-8"):
with open(path, encoding=encoding) as f:
dataset = f.readlines()
return dataset
class Dataset:
def __init__(self, dataset):
"""
The format of dataset should be CoNLL.
Tab-separated columns, sentences separated by space.
each line contains: ID, Word, Base, POS, POS2, ?, Head, Type.
"""
self.data = self._format(dataset)
def __len__(self):
return len(self.data)
def __getitem__(self, key):
if isinstance(key, slice):
return [self.data[i] for i in range(*key.indices(len(self.data)))]
elif isinstance(key, int):
if key < 0:
key += len(self.data)
if key < 0 or key >= len(self.data):
raise IndexError
return self.data[key]
else:
raise TypeError
def _format(self, dataset):
batch = []
for i in range(len(dataset)):
if dataset[i] == "\n":
first = i - int(dataset[i-1].split("\t")[0])
batch.append([line.split("\n")[0].split("\t") for line in dataset[first:i]])
return batch
class FeatureExtractor:
def __init__(self, dataset, lr=0.01, topn=30, init_func=None):
self.dataset = dataset
self.lr = lr
self.topn = topn
self.init_func = init_func
self.features = None
self.weight = None
def get_weights(self):
self.features = self._get_features()
self.weights = self._update_weights()
return self.weights
def _count_features(self):
cnt = defaultdict(lambda: defaultdict(int))
for batch in self.dataset:
for i, data_i in enumerate(batch):
cnt["word"][data_i[1]] += 1
cnt["pos"][data_i[3]] += 1
cnt["dep"][data_i[-1]] += 1
if data_i[-1] == "ROOT":
cnt["pos_pos"][f"{data_i[3]}_ROOT"] += 1
cnt["word_pos"][f"{data_i[1]}_ROOT"] += 1
cnt["dep"][f"{data_i[-1]}_ROOT"] += 1
else:
head_idx = i - int(data_i[0]) + int(data_i[6])
head = batch[head_idx]
cnt["word_word"][f"{data_i[1]}_{head[1]}"] += 1
cnt["pos_pos"][f"{data_i[3]}_{head[3]}"] += 1
cnt["word_pos"][f"{data_i[1]}_{head[3]}"] += 1
cnt["pos_word"][f"{data_i[3]}_{head[1]}"] += 1
cnt["dep_word"][f"{data_i[-1]}_{head[1]}"] += 1
cnt["dep_pos"][f"{data_i[-1]}_{head[3]}"] += 1
return cnt
def _get_features(self):
"""
This function extract features for graph-based parser.
Features include word form, pos, combination of pos and word form.
"""
cnt = self._count_features()
features = []
for feat_type in cnt.keys():
if feat_type == "pos_pos":
for feat, v in cnt[feat_type].items():
if v >= 5:
features.append(feat)
else:
for feat, v in sorted(cnt[feat_type].items(), key=lambda i: i[1], reverse=True)[:self.topn]:
features.append(feat)
return features
def _init_weights(self):
weights = defaultdict(int)
for feat in self.features:
if self.init_func:
weights[feat] = self.init_func()
else:
weights[feat] = random.random()
return weights
def _update_weights(self):
w = self._init_weights()
for batch in self.dataset:
score = defaultdict(int)
for i, data_i in enumerate(batch):
for j, data_j in enumerate(batch):
if i == j:
continue
score[j] += w[data_i[1]]
score[j] += w[data_i[3]]
score[j] += w[data_i[-1]]
score[j] += w[f"{data_i[1]}_{data_j[1]}"]
score[j] += w[f"{data_i[1]}_{data_j[3]}"]
score[j] += w[f"{data_i[3]}_{data_j[1]}"]
score[j] += w[f"{data_i[3]}_{data_j[3]}"]
score[j] += w[f"{data_i[-1]}_{data_j[1]}"]
score[j] += w[f"{data_i[-1]}_{data_j[3]}"]
score["ROOT"] += w[f"{data_i[1]}_ROOT"]
score["ROOT"] += w[f"{data_i[3]}_ROOT"]
score["ROOT"] += w[f"{data_i[-1]}_ROOT"]
if len(score) == 0:
continue
head_pred = sorted(score.items(), key=lambda i: i[1], reverse=True)[0][0]
head_true = data_i[6]
if head_pred == "ROOT":
if data_i[-1] != "ROOT":
w[data_i[1]] -= self.lr
w[data_i[3]] -= self.lr
w[f"{data_i[1]}_ROOT"] -= self.lr
w[f"{data_i[3]}_ROOT"] -= self.lr
w[f"{data_i[-1]}_ROOT"] -= self.lr
elif head_pred != head_true:
data_j = batch[head_pred]
w[data_i[1]] -= self.lr
w[data_i[3]] -= self.lr
w[data_i[-1]] -= self.lr
w[f"{data_i[1]}_{data_j[1]}"] -= self.lr
w[f"{data_i[1]}_{data_j[3]}"] -= self.lr
w[f"{data_i[3]}_{data_j[1]}"] -= self.lr
w[f"{data_i[3]}_{data_j[3]}"] -= self.lr
w[f"{data_i[-1]}_{data_j[1]}"] -= self.lr
w[f"{data_i[-1]}_{data_j[3]}"] -= self.lr
return w
train_dataset = Dataset(load_dataset(train_file_path))
test_dataset = Dataset(load_dataset(test_file_path))
class MSTParser:
def __init__(self, extractor):
self.w = extractor.get_weights()
def __call__(self, batch):
return self.parse(batch)
def parse(self, batch):
scores = self._score(batch)
best_edges = self._get_best_edges(scores)
scores = self._subtract(scores, best_edges)
best_edges = self._get_best_edges(scores)
status, cycles = self._involveCycle([edge for edge, score in best_edges])
if status:
best_edges = self._remove_cycles(scores, cycles, best_edges)
return [edge for edge, score in best_edges[1:]]
def evaluate(self, dataset):
UAS_SUM = 0
for batch in dataset:
# Parse a sentence
scores = self._score(batch)
best_edges = self._get_best_edges(scores)
scores = self._subtract(scores, best_edges)
best_edges = self._get_best_edges(scores)
status, cycles = self._involveCycle([edge for edge, score in best_edges])
if status:
best_edges = self._remove_cycles(scores, cycles, best_edges)
y_test = self._extract_test(batch)
y_pred = [edge for edge, score in best_edges[1:]]
UAS_SUM += self._get_UAS(y_test, y_pred)
return UAS_SUM / len(dataset)
def _score(self, batch):
N = len(batch)
scores = np.ones([N+1, N+1]) * -1e3
for i in range(1, N+1):
for j in range(N+1):
if i == j:
continue
score = 0
score += self.w[batch[i-1][1]] #word
score += self.w[batch[i-1][3]] # pos
score += self.w[batch[i-1][-1]]
if j == 0:
score += self.w[f"{batch[i-1][1]}_ROOT"] + 1
score += self.w[f"{batch[i-1][3]}_ROOT"] + 1
score += self.w[f"{batch[i-1][-1]}_ROOT"] + 1
else:
score += self.w[f"{batch[i-1][1]}_{batch[j-1][1]}"]
score += self.w[f"{batch[i-1][1]}_{batch[j-1][3]}"]
score += self.w[f"{batch[i-1][3]}_{batch[j-1][1]}"]
score += self.w[f"{batch[i-1][3]}_{batch[j-1][3]}"]
score += self.w[f"{batch[i-1][-1]}_{batch[j-1][1]}"]
score += self.w[f"{batch[i-1][-1]}_{batch[j-1][3]}"]
scores[i, j] = score
return scores
def _get_best_edges(self, scores):
# ignore 0-th row because it would contain scores between ROOT as dependent and words as head
return [(np.argmax(scores[i, :]),
np.max(scores[i, :]))
if i != 0 else (-1, -1e3)
for i in range(scores.shape[0])
]
# best_edges = []
# root_child_idx = np.argmax(scores[:, 0])
# for i in range(scores.shape[0]):
# if i == root_child_idx:
# best_edges.append((0, scores[root_child_idx, 0]))
# elif i == 0:
# best_edges.append((-1, -1e3))
# else:
# head_idx = np.argmax(scores[i, 1:])
# best_edges.append((head_idx, scores[i, head_idx]))
# return best_edges
def _subtract(self, scores, best_edges):
N = scores.shape[0]
for i in range(N):
for j in range(N):
if i == 0 or i == j:
continue
scores[i, j] -= best_edges[i][1]
return scores
def _involveCycle(self, edges):
memory = []
for dep, head in enumerate(edges):
dep_ = edges[head]
if dep == dep_ and (sorted([dep, head]) not in memory):
memory.append(sorted([dep, head]))
if memory:
return (True, memory)
else:
return (False, [])
def _remove_cycles(self, scores, cycles, best_edges):
N = scores.shape[0]
for cycle in cycles:
scores_ = scores.copy()
scores_[cycle[0], cycle[1]] = -1e3
scores_[cycle[1], cycle[0]] = -1e3
node, j = divmod(np.argmax(scores_[cycle, :]), N)
if node == 0:
c_head = cycle[0]
else:
c_head = cycle[1]
best_edges[c_head] = (j, scores[c_head, j])
return best_edges
def _extract_test(self, batch):
return [int(data[6]) for data in batch]
def _get_UAS(self, y_test, y_pred):
assert len(y_test) == len(y_pred)
match_num = 0
for test, pred in zip(y_test, y_pred):
if test == pred:
match_num += 1
return match_num / len(y_test)
extractor = FeatureExtractor(train_dataset, topn=100, lr=0.1, init_func=None)
parser = MSTParser(extractor)
parser.evaluate(test_dataset)
random.choice(train_dataset)
| 0.647352 | 0.6383 |
# Time Series Analysis & Forecasting of New Home Sales
## Table of Contents
1. Introduction
2. Objectives
3. Data & Methods
4. Results
4.1 Exploratory Data Analysis (EDA)
4.2 Forecasting
4.2.1 Input data & decomposition
4.2.2 Forecasting with HW Exponential Smoothing
4.2.3 Forecasting with ETS
4.2.4 Forecasting with ARIMA
5. Discussion & Conclusion
5.1 Model evaluation
5.2 General conclusions
5.3 Discussion
## 1. Introduction
New house construction & sales play a significant role in housing economy. Besides generating employment it simultaneously impacts timber, furniture and appliance markets. It's an important indicator of country's overall economic health too. Over the last 50 years, as we will see below, there has been few significant bumps and turning points in housing market, which shaped the trajectory of the US economy.
## 2. Objectives
The objectives of this analysis are two-fold: (1) review and discuss historical patterns in new home sales; and (2) test different forecasting models and run short-term forecasting of new home sales.
## 3. Data & methods
- The time series data I am working with comes from [census.gov](https://bit.ly/2Sp1ALz). This is a great source for time series data sets on a large number of social, economic and business indicators.
- I downloaded the "New Singly-Family House Sold" series. This is not seasonally adjusted, but there is one such series called "Annual Rate of Single-Family House Sold", which has already gone through some data treatments already for seasonal adjustments, so I rather prefered the "raw" data instead, which is reported monthly and not adjusted.
- Key terms in this dataset are "new", "single-family house" and "sold". [Check out](https://www.census.gov/construction/nrs/definitions/index.html) the definition of these and other related terms.
- I am doing the analysis in R. Although Python has great `ts` [resources](https://www.statsmodels.org), but for forecasting R has the best package, `forecast`, developed by Rob J Hyndman.
```
# Required packages
library(fpp2)
library(forecast)
library(readxl)
library(ggplot2)
library(seasonal)
library(dplyr)
options(warn=0)
# data import
df = read.csv("C:/Users/DataS/Google Drive/Python/Datasets/usnewhousesold.csv", skip=6)
head(df)[1:3,]
# keep only `Value` column
df = df[, c(2)]
# convert the values into a time series object
series = ts(df, start = 1963, frequency =12)
```
## 4. Results
### 4.1 EDA
```
options(repr.plot.width = 6, repr.plot.height = 3)
# plot the series
autoplot(series)+
xlab("Time") + ylab("New home sales '000") + ggtitle(" Figure 1: New home sales series")+
theme(plot.title = element_text(size=8))
# Seasonal sub-series plot (the horizontal bar indicates montly mean values)
options(repr.plot.width = 10, repr.plot.height = 3)
series_season = window(series, start=c(1963,1), end=c(2017,12))
ggsubseriesplot(series_season) + ylab(" ") +
ggtitle("Figure 2: Seasonal sbseries plot")+
theme(plot.title = element_text(size=10))
options(repr.plot.width = 6, repr.plot.height = 3)
# remove seasonality (monthly variation) to see yearly changes
series_ma = ma(series, 12)
autoplot(series_ma) +
xlab("Time") + ylab("New home sales '000")+
ggtitle("Figure 3: The series after removing seasonality" )+
theme(plot.title = element_text(size=8))
options(repr.plot.width = 6, repr.plot.height = 3)
# zooming in to the down time, which is clearly between 2005 to 2012
series_downtime = window(series, start=c(2005,3), end=c(2012,2))
autoplot(series_downtime) +
xlab("Time") + ylab("New home sales '000")+
ggtitle(" Figure 4: New home sales down time")+
theme(plot.title = element_text(size=8))
```
**EDA Summary**
- Figure 1 shows clear seasonality all through, with a bit of cyclic pattern.
- In terms of seasonality, not surprisingly, home sales starts to go up in the spring, peaks during summer, then goes down in the fall. This is predictable in most years, except the years between 2005-2012 (Figure 1 & 3).
- After removing seasonality few things stand out (figure 3): there wasn't much movement (i.e. trend) in new home sales until 1990, other than the seasonality and a vague 4-5 year cycle. Post-1990 the market saw a boom, a steady growth that continued until 2005 - and which is when the market started to crash. The downward spiral continues for 8 years until 2012 (Figure 4). There has been a recovery since then with another steady growth, and with predictable seasonality (also see Figure 5), but with no cyclic patterns (mimicking 1990-2005).
- Sales were down more than 80% from average 127k/month to 23k/month during the "crash" years. Currently at 52k/month the sales is still 60% down compared to pre-2005 sales.
## 4.2 Forecasting
- In time series forecasting historical data is used as predictors of future values. But from Figure 3 it is clear that the distant past isn't any good in predicting the next 5-10 years. It is also clear that the segment in this series which may be useful for prediction is post-2012\ data (but see the [Appendix](#appendix) for prediction with the whole series)
- Decomposed data shows a predictable seasonality and trend component (Figure 6), so any complex modeling doesn't seem necessary. A Holt Winter Exponential smoothing or ARIMA should work just fine for forecasting. Nevertheless, I'm using 3 different methods to compare: ETS, HW Exponential Smoothing and ARIMA. There are plenty of literature on the internet on these forecasting methods, so I'm not going to discuss them ([and there is another good reason for not discussing theories!](https://data2decision.net/2018/11/16/learning-data-science-by-examples-rather-than-theories/)).
### 4.2.1 Input data & Decomposition
```
options(repr.plot.width = 6, repr.plot.height = 3)
# slicing 2012-2018 data as predictor series
onwards2012 = window(series, start=c(2012,1), end=c(2018,9))
autoplot(onwards2012) + labs(caption="Figure 5")+ xlab("Time") + ylab("New home sales '000")+
ggtitle(" Figure 5: Predictor series 2012-2018")+
theme(plot.title = element_text(size=8))
# decomposition
options(repr.plot.width = 6, repr.plot.height = 3)
autoplot(decompose(onwards2012)) + ggtitle("Figure 6: Decomposition of the series")+
theme(plot.title = element_text(size=8))
```
### 4.2.2 Forecasting with HW Exponential Smoothing
```
# model
forecast_hw=hw(onwards2012, seasonal="multiplicative", h=63)
options(repr.plot.width = 10, repr.plot.height = 3)
# plot
autoplot(series, series = " 1963-2011 series")+
autolayer(onwards2012, series = "Predictor series")+
autolayer(forecast_hw, series="Holt-Winter forecast")+
xlab("Time") + ylab("New home sales '000")+
ggtitle("Figure 7: HW Exponential Smoothing")+
theme(plot.title = element_text(size=8))
# point forecast for 2023 annual sales of new homes
forecast2023hw=tail(forecast_hw$mean, n=12)
forecast2023hw = sum(forecast2023hw)
round(forecast2023hw)
# Diagnostics/accuracy test
accuracy(forecast_hw)
# model description
forecast_hw['model']
```
### 4.2.3 Forecasting with ETS method
```
# model
forecast_ets = forecast(onwards2012, h=63)
options(repr.plot.width = 10, repr.plot.height = 3)
# plot
autoplot(series, series=" 1963-2011 series")+
autolayer(forecast_ets, series=" ETS forecast")+
autolayer(onwards2012, series=" Predictor series")+
ggtitle(" Figure 8: ETS forecasting")+
theme(plot.title = element_text(size=8))
# point forecast
forecast2023ets=tail(forecast_ets$mean, n=12)
forecast2023ets = sum(forecast2023ets)
round(forecast2023ets)
# model diagnostics
accuracy(forecast_ets)
# model description
forecast_ets['model']
```
### 4.2.4 Forecasting with ARIMA
```
# model
fit.arima = auto.arima(onwards2012, seasonal=TRUE, stepwise = FALSE, approximation = FALSE)
forecast_arima = forecast(fit.arima, h=63)
options(repr.plot.width = 10, repr.plot.height = 3)
# plot
autoplot(series, series=" 1963-2011 series")+
autolayer(onwards2012, series=" Input series")+
autolayer(forecast_arima, series=" ARIMA Forecast")+
ggtitle(" Figure 9: ARIMA forecasting")+
theme(plot.title = element_text(size=8))
# point forecast
forecast2023arima=tail(forecast_arima$mean, n=12)
forecast2023arima = sum(forecast2023arima)
cat("New house sold in 2023 ('000): ", round(forecast2023arima))
print('')
# current
cat(" Current value ('000): ", sum(tail(onwards2012, n=12)))
# diagnostics
accuracy(fit.arima)
# AIC
fit.arima$aic
```
## 5. Model evaluation and Conclusion
** Model comparison **
- AIC
- HW: 539.6391
- ETS: 534.3067
- ARIMA: 364.97
- MAE
- HW: 1.989132
- ETS: 1.880336
- ARIMA: 1.896309
**Main conclusions**
- No growth happened in new home sales from 1960s for 30 years until 1990. Then started to climb up until 2005 before starting to collapse in 2005.
- New home sales declined by 75% in the 5 years between 2005 and 2010
- Sales is recovering since 2012, yet far from the pre-collapse sales
- Current sales it about 630k new homes per year
- 5-year forecast until 2023 shows total home sales at 870k - a total growth of about 40% (7% per year). This is a business-as-usual scenario, i.e., IF the everything goes as is.
- The projected growth is still not even close toe pre-2005 level (>1200k/year). It can take, in the curent trend, 2035 to catch up to 2005 level
### Discussion
As revealed in [the report came out this week](https://www.census.gov/construction/nrs/pdf/newressales.pdf), and also reported in [Wall Street Journal](https://www.wsj.com/articles/new-home-sales-fell-8-9-in-october-1543417455), seasonally adjusted rate of new home sales has declined by 8.9% in October signaling a market slowdown. Some market analysts expect this to continue, predicting post 2012 boom may be over. It will take a couple of years to understand the trend before we can say with higher certainty what the future holds for this important market segment.
**Acknowledgements:** `forecast` package made life made so much easier. Thanks to Rob J Hyndman and collaborators for the great work and graciously making the book "Forecasting: Principles and Practice" open access.
<a name="appendix"></a>
#### Appendix: Alternative forecasting with the whole series (1963-2018).
```
options(repr.plot.width = 10, repr.plot.height = 3)
hw_series=hw(series, seasonal="multiplicative", h=63)
autoplot(series, series=" Predictor series 1963-2018")+
autolayer(hw_series, series=" HW Forecast ")+ylab("New home sales thousands")
```
|
github_jupyter
|
# Required packages
library(fpp2)
library(forecast)
library(readxl)
library(ggplot2)
library(seasonal)
library(dplyr)
options(warn=0)
# data import
df = read.csv("C:/Users/DataS/Google Drive/Python/Datasets/usnewhousesold.csv", skip=6)
head(df)[1:3,]
# keep only `Value` column
df = df[, c(2)]
# convert the values into a time series object
series = ts(df, start = 1963, frequency =12)
options(repr.plot.width = 6, repr.plot.height = 3)
# plot the series
autoplot(series)+
xlab("Time") + ylab("New home sales '000") + ggtitle(" Figure 1: New home sales series")+
theme(plot.title = element_text(size=8))
# Seasonal sub-series plot (the horizontal bar indicates montly mean values)
options(repr.plot.width = 10, repr.plot.height = 3)
series_season = window(series, start=c(1963,1), end=c(2017,12))
ggsubseriesplot(series_season) + ylab(" ") +
ggtitle("Figure 2: Seasonal sbseries plot")+
theme(plot.title = element_text(size=10))
options(repr.plot.width = 6, repr.plot.height = 3)
# remove seasonality (monthly variation) to see yearly changes
series_ma = ma(series, 12)
autoplot(series_ma) +
xlab("Time") + ylab("New home sales '000")+
ggtitle("Figure 3: The series after removing seasonality" )+
theme(plot.title = element_text(size=8))
options(repr.plot.width = 6, repr.plot.height = 3)
# zooming in to the down time, which is clearly between 2005 to 2012
series_downtime = window(series, start=c(2005,3), end=c(2012,2))
autoplot(series_downtime) +
xlab("Time") + ylab("New home sales '000")+
ggtitle(" Figure 4: New home sales down time")+
theme(plot.title = element_text(size=8))
options(repr.plot.width = 6, repr.plot.height = 3)
# slicing 2012-2018 data as predictor series
onwards2012 = window(series, start=c(2012,1), end=c(2018,9))
autoplot(onwards2012) + labs(caption="Figure 5")+ xlab("Time") + ylab("New home sales '000")+
ggtitle(" Figure 5: Predictor series 2012-2018")+
theme(plot.title = element_text(size=8))
# decomposition
options(repr.plot.width = 6, repr.plot.height = 3)
autoplot(decompose(onwards2012)) + ggtitle("Figure 6: Decomposition of the series")+
theme(plot.title = element_text(size=8))
# model
forecast_hw=hw(onwards2012, seasonal="multiplicative", h=63)
options(repr.plot.width = 10, repr.plot.height = 3)
# plot
autoplot(series, series = " 1963-2011 series")+
autolayer(onwards2012, series = "Predictor series")+
autolayer(forecast_hw, series="Holt-Winter forecast")+
xlab("Time") + ylab("New home sales '000")+
ggtitle("Figure 7: HW Exponential Smoothing")+
theme(plot.title = element_text(size=8))
# point forecast for 2023 annual sales of new homes
forecast2023hw=tail(forecast_hw$mean, n=12)
forecast2023hw = sum(forecast2023hw)
round(forecast2023hw)
# Diagnostics/accuracy test
accuracy(forecast_hw)
# model description
forecast_hw['model']
# model
forecast_ets = forecast(onwards2012, h=63)
options(repr.plot.width = 10, repr.plot.height = 3)
# plot
autoplot(series, series=" 1963-2011 series")+
autolayer(forecast_ets, series=" ETS forecast")+
autolayer(onwards2012, series=" Predictor series")+
ggtitle(" Figure 8: ETS forecasting")+
theme(plot.title = element_text(size=8))
# point forecast
forecast2023ets=tail(forecast_ets$mean, n=12)
forecast2023ets = sum(forecast2023ets)
round(forecast2023ets)
# model diagnostics
accuracy(forecast_ets)
# model description
forecast_ets['model']
# model
fit.arima = auto.arima(onwards2012, seasonal=TRUE, stepwise = FALSE, approximation = FALSE)
forecast_arima = forecast(fit.arima, h=63)
options(repr.plot.width = 10, repr.plot.height = 3)
# plot
autoplot(series, series=" 1963-2011 series")+
autolayer(onwards2012, series=" Input series")+
autolayer(forecast_arima, series=" ARIMA Forecast")+
ggtitle(" Figure 9: ARIMA forecasting")+
theme(plot.title = element_text(size=8))
# point forecast
forecast2023arima=tail(forecast_arima$mean, n=12)
forecast2023arima = sum(forecast2023arima)
cat("New house sold in 2023 ('000): ", round(forecast2023arima))
print('')
# current
cat(" Current value ('000): ", sum(tail(onwards2012, n=12)))
# diagnostics
accuracy(fit.arima)
# AIC
fit.arima$aic
options(repr.plot.width = 10, repr.plot.height = 3)
hw_series=hw(series, seasonal="multiplicative", h=63)
autoplot(series, series=" Predictor series 1963-2018")+
autolayer(hw_series, series=" HW Forecast ")+ylab("New home sales thousands")
| 0.561215 | 0.981293 |
```
import os
import trimesh
import numpy as np
import glob
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import kaolin as kal
import kaolin.ops.conversions as tfs
from kaolin.io.modelnet import ModelNet
from kaolin.ops.gcn import GraphConv
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
#Downloading dataset
directory = tf.keras.utils.get_file(
"modelnet.zip",
"http://3dvision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip",
extract=True
)
directory = os.path.join(os.path.dirname(directory), "ModelNet10")
#Data observation
data1 = trimesh.load(os.path.join(directory, "monitor/train/monitor_0005.off"))
data1.show()
#Data observation
data2 = trimesh.load(os.path.join(directory, "sofa/train/sofa_0005.off"))
data2.show()
#Data observation
data3 = trimesh.load(os.path.join(directory, "bathtub/train/bathtub_0005.off"))
data3.show()
#Useful Parameters
data_path = os.path.join(os.path.dirname(directory), "ModelNet10")
obj_categories = ['monitor', 'sofa', 'bed','bathtub', 'table']
number_of_points = 2048
number_of_batch_size = 32
rate = 0.001
total_epochs = 20
training_loader = DataLoader(ModelNet(data_path,
categories=obj_categories,
split='train'),
batch_size=number_of_batch_size,
shuffle=True)
validating_loader = DataLoader(ModelNet(data_path,
categories=obj_categories,
split='test'),
batch_size=number_of_batch_size)
#Building the model
model = GraphConv(len(obj_categories), 5)
opt = torch.optim.Adam(model.parameters(), lr=rate)
entropy_loss = torch.nn.CrossEntropyLoss()
#Training the model
for epoch in range(total_epochs):
print('\nEpoch: {epoch}\n')
training_loss = 0.
training_accuracy = 0.
model.train()
for index, (value, dicts) in enumerate(tqdm(training_loader)):
parti_category = dicts['category']
prediction = model(value)
loss = criterion(prediction, parti_category.view(-1))
training_loss += loss.item()
loss.backward()
opt.step()
opt.zero_grad()
#Calculating accuracy
lbl_prediction = torch.argmax(prediction, dim=1)
training_accuracy += torch.mean((lbl_prediction == parti_category.view(-1)).float()).item()
#Displying trainig loss and accuracy
print('Training loss:', training_loss / len(training_loader))
print('Training accuracy:', training_accuracy / len(training_loader))
validation_loss = 0.
validation_accuracy = 0.
model.eval()
with torch.no_grad():
for index, (value, dicts) in enumerate(tqdm(validating_loader)):
parti_category = dicts['category']
prediction = model(value)
loss = criterion(prediction, parti_category.view(-1))
validation_loss += loss.item()
#Calculating accuracy
lbl_prediction = torch.argmax(prediction, dim=1)
validation_accuracy += torch.mean((lbl_prediction == parti_category.view(-1)).float()).item()
#Displaying validation loss and accuracy
print('Validation loss:', validation_loss / len(validating_loader))
print('Validation accuracy:', validation_accuracy / len(validating_loader))
#Evaluating the trained model on test datasets
testing_loader = DataLoader(ModelNet(data_path,
categories=obj_categories,
split='test'),
shuffle=True,
batch_size=number_of_batch_size)
value, dicts = next(iter(testing_loader))
parti_category = dicts['category']
prediction = model(value)
lbl_prediction = torch.max(prediction, axis=1)[1]
#Displaying results
testIndex = 0 #We can enter the test index from 0..<number_of_batch_size
lbl = obj_categories[parti_category[testIndex].item()]
pred = obj_categories[lbl_prediction[testIndex]]
fig = plt.figure()
sub_plot = fig.add_subplot(1, 1, 1, projection='3d')
for index, point in enumerate(pointclouds):
color = 'g'
if pred == lbl:
color = 'g'
else:
color = 'r'
sub_plot.scatter(point[:, 0], point[:, 1], point[:, 2], c=color, s=3)
sub_plot.set_title('Original Image: {0}\nPredicted Image: {1}'.format(lbl, pred))
sub_plot.axis('off')
plt.show()
```
|
github_jupyter
|
import os
import trimesh
import numpy as np
import glob
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import kaolin as kal
import kaolin.ops.conversions as tfs
from kaolin.io.modelnet import ModelNet
from kaolin.ops.gcn import GraphConv
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
#Downloading dataset
directory = tf.keras.utils.get_file(
"modelnet.zip",
"http://3dvision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip",
extract=True
)
directory = os.path.join(os.path.dirname(directory), "ModelNet10")
#Data observation
data1 = trimesh.load(os.path.join(directory, "monitor/train/monitor_0005.off"))
data1.show()
#Data observation
data2 = trimesh.load(os.path.join(directory, "sofa/train/sofa_0005.off"))
data2.show()
#Data observation
data3 = trimesh.load(os.path.join(directory, "bathtub/train/bathtub_0005.off"))
data3.show()
#Useful Parameters
data_path = os.path.join(os.path.dirname(directory), "ModelNet10")
obj_categories = ['monitor', 'sofa', 'bed','bathtub', 'table']
number_of_points = 2048
number_of_batch_size = 32
rate = 0.001
total_epochs = 20
training_loader = DataLoader(ModelNet(data_path,
categories=obj_categories,
split='train'),
batch_size=number_of_batch_size,
shuffle=True)
validating_loader = DataLoader(ModelNet(data_path,
categories=obj_categories,
split='test'),
batch_size=number_of_batch_size)
#Building the model
model = GraphConv(len(obj_categories), 5)
opt = torch.optim.Adam(model.parameters(), lr=rate)
entropy_loss = torch.nn.CrossEntropyLoss()
#Training the model
for epoch in range(total_epochs):
print('\nEpoch: {epoch}\n')
training_loss = 0.
training_accuracy = 0.
model.train()
for index, (value, dicts) in enumerate(tqdm(training_loader)):
parti_category = dicts['category']
prediction = model(value)
loss = criterion(prediction, parti_category.view(-1))
training_loss += loss.item()
loss.backward()
opt.step()
opt.zero_grad()
#Calculating accuracy
lbl_prediction = torch.argmax(prediction, dim=1)
training_accuracy += torch.mean((lbl_prediction == parti_category.view(-1)).float()).item()
#Displying trainig loss and accuracy
print('Training loss:', training_loss / len(training_loader))
print('Training accuracy:', training_accuracy / len(training_loader))
validation_loss = 0.
validation_accuracy = 0.
model.eval()
with torch.no_grad():
for index, (value, dicts) in enumerate(tqdm(validating_loader)):
parti_category = dicts['category']
prediction = model(value)
loss = criterion(prediction, parti_category.view(-1))
validation_loss += loss.item()
#Calculating accuracy
lbl_prediction = torch.argmax(prediction, dim=1)
validation_accuracy += torch.mean((lbl_prediction == parti_category.view(-1)).float()).item()
#Displaying validation loss and accuracy
print('Validation loss:', validation_loss / len(validating_loader))
print('Validation accuracy:', validation_accuracy / len(validating_loader))
#Evaluating the trained model on test datasets
testing_loader = DataLoader(ModelNet(data_path,
categories=obj_categories,
split='test'),
shuffle=True,
batch_size=number_of_batch_size)
value, dicts = next(iter(testing_loader))
parti_category = dicts['category']
prediction = model(value)
lbl_prediction = torch.max(prediction, axis=1)[1]
#Displaying results
testIndex = 0 #We can enter the test index from 0..<number_of_batch_size
lbl = obj_categories[parti_category[testIndex].item()]
pred = obj_categories[lbl_prediction[testIndex]]
fig = plt.figure()
sub_plot = fig.add_subplot(1, 1, 1, projection='3d')
for index, point in enumerate(pointclouds):
color = 'g'
if pred == lbl:
color = 'g'
else:
color = 'r'
sub_plot.scatter(point[:, 0], point[:, 1], point[:, 2], c=color, s=3)
sub_plot.set_title('Original Image: {0}\nPredicted Image: {1}'.format(lbl, pred))
sub_plot.axis('off')
plt.show()
| 0.673514 | 0.405066 |
<a href="https://colab.research.google.com/github/Deepu-Pk/AI-Lab6/blob/main/4_Queen.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from queue import Queue
#Function for solving 4-queen using DFS algorithm
def DFS():
if size < 1:
return []
solutions = []
stack = [[]]
while stack:
solution = stack.pop()
if conflict(solution):
continue
row = len(solution)
if row == size:
solutions.append(solution)
continue
for col in range(size):
queen = (row, col)
queens = solution.copy()
queens.append(queen)
stack.append(queens)
return solutions
#Function for soving 4-queen using BFS algorithm
def BFS():
if size < 1:
return []
solutions = []
queue = Queue()
queue.put([])
while not queue.empty():
solution = queue.get()
if conflict(solution):
continue
row = len(solution)
if row == size:
solutions.append(solution)
continue
for col in range(size):
queen = (row, col)
queens = solution.copy()
queens.append(queen)
queue.put(queens)
return solutions
#Fuction for checking concliting
def conflict(queens):
for i in range(1, len(queens)):
for j in range(0, i):
a, b = queens[i]
c, d = queens[j]
if a == c or b == d or abs(a - c) == abs(b - d):
return True
return False
#Function for printing solution
def display(queens):
for i in range(size):
print(' ---' * size)
for j in range(size):
p = 'Q' if (i, j) in queens else ' '
print(' %s ' % p, end='')
print(' ')
print(' ---' * size)
size = 4 #Intializing size = 4(4 queen problem)
dfs_solutions = DFS() #Calling DFS solution
bfs_solutions = BFS()#Calling BFS solution
print("DFS Solution :")
#Calling display fuction display output
for i,solution in enumerate(dfs_solutions):
print('Solution %d:' % (i + 1))
display(solution)
print()
print("BFS Solution : ")
for i, solution in enumerate(bfs_solutions):
print('Solution %d:' % (i + 1))
display(solution)
```
|
github_jupyter
|
from queue import Queue
#Function for solving 4-queen using DFS algorithm
def DFS():
if size < 1:
return []
solutions = []
stack = [[]]
while stack:
solution = stack.pop()
if conflict(solution):
continue
row = len(solution)
if row == size:
solutions.append(solution)
continue
for col in range(size):
queen = (row, col)
queens = solution.copy()
queens.append(queen)
stack.append(queens)
return solutions
#Function for soving 4-queen using BFS algorithm
def BFS():
if size < 1:
return []
solutions = []
queue = Queue()
queue.put([])
while not queue.empty():
solution = queue.get()
if conflict(solution):
continue
row = len(solution)
if row == size:
solutions.append(solution)
continue
for col in range(size):
queen = (row, col)
queens = solution.copy()
queens.append(queen)
queue.put(queens)
return solutions
#Fuction for checking concliting
def conflict(queens):
for i in range(1, len(queens)):
for j in range(0, i):
a, b = queens[i]
c, d = queens[j]
if a == c or b == d or abs(a - c) == abs(b - d):
return True
return False
#Function for printing solution
def display(queens):
for i in range(size):
print(' ---' * size)
for j in range(size):
p = 'Q' if (i, j) in queens else ' '
print(' %s ' % p, end='')
print(' ')
print(' ---' * size)
size = 4 #Intializing size = 4(4 queen problem)
dfs_solutions = DFS() #Calling DFS solution
bfs_solutions = BFS()#Calling BFS solution
print("DFS Solution :")
#Calling display fuction display output
for i,solution in enumerate(dfs_solutions):
print('Solution %d:' % (i + 1))
display(solution)
print()
print("BFS Solution : ")
for i, solution in enumerate(bfs_solutions):
print('Solution %d:' % (i + 1))
display(solution)
| 0.254602 | 0.848157 |
```
import numpy as np
import pandas as pd
df = pd.read_csv("data.csv")
import math
class Node:
def __init__(self):
self._is_result_node = False
self.childrens = []
def get_child_node(self, value):
return filter(lambda x: child.condition == value,self.childrens)[0]
def add_child_node(self, ):
pass
def is_result_node(self):
return self._is_result_node
class Tree:
def __init__(self):
self.childs = []
def build(self, D):
return self.childs
df
group1 = df.groupby('class:buys_computer')
print(group1)
str(group1)
total = len(df)
print(total)
df.groupby("class:buys_computer")
# print([len(x[1]) for x in group1])
InfoD = getInfo(df)
print(InfoD)
import uuid
InfoageD = 0
for x in group1:
column_name = x[0]
cdf = x[1]
property_size = len(cdf)
print(property_size)
print(column_name)
print(cdf)
InfoageD += property_size/total * getInfo(cdf)
print(InfoageD)
gain = InfoD - InfoageD
print(gain)
from from treelib import Node, Tree import Node, Tree
class Edge:
def __init__(self):
super().__init__()
self.source = None,
self.target = None
self.value = None
def get_json_data(self):
return {
"id": self.source.id + ":" + self.target.id,
"source": self.source.id,
"target": self.target.id,
"value": self.value
}
class Node:
def __init__(self):
super().__init__()
self.value = None
self.id = str(uuid.uuid1())
# def append_child(self, child):
# self.childs.append(child)
# def get_json_data(self):
# if self.childs:
# return {
# "_name": self.value,
# "children": [child.get_json_data() for child in self.childs]
# }
# else:
# return {
# "_name": self.value,
# "is_leaf": True
# }
def __str__(self):
u = (self.label, self.value)
return str(u)
class Tree:
def __init__(self):
super().__init__()
self.edges = {
}
self.root_id = "id"
self.nodes = {
}
def _get_child_node_json_data(self, node_id):
if node_id not in self.edges:
return {
"id": node_id,
"value": self.nodes[node_id].value,
"is_leaf": True,
}
else:
return {
"id": node_id,
"value": self.nodes[node_id].value,
"children": [self._get_child_node_json_data(edge.target.id) for edge in self.edges[node_id]]
}
def get_node_json_data(self):
return self._get_child_node_json_data(self.root_id)
def get_edge_json_data(self):
return [ edge.get_json_data() for node_edges in self.edges.values() for edge in node_edges]
def get_json_data(self):
return {
"nodes": self.get_node_json_data(),
"edges": self.get_edge_json_data(),
}
def create_node(self):
return Node()
def add_node(self, node):
if not self.nodes:
self.root_id = node.id
self.nodes[node.id] = node
def add_edge(self, par_node, node, edge_value):
par_node_id = par_node.id
if par_node_id not in self.edges:
self.edges[par_node_id] = []
edge = Edge()
edge.source = par_node
edge.target = node
edge.value = edge_value
self.edges[par_node_id].append(edge)
def attribute_selection_method(D, attribute_list):
def getInfo(D):
group = D.groupby('class:buys_computer')
total = len(D)
# print([len(x[1]) for x in group1])
InfoD = -sum([len(x[1])/total*math.log2(len(x[1])/total) for x in group])
return InfoD
candidate_splitting_criterion = []
for attribute in attribute_list:
group1 = D.groupby(attribute)
InfoageD = 0
for x in group1:
column_name = x[0]
cdf = x[1]
property_size = len(cdf)
# print(property_size)
# print(column_name)
# print(cdf)
InfoageD += property_size/total * getInfo(cdf)
# print(InfoageD)
gain = InfoD - InfoageD
candidate_splitting_criterion.append((gain, attribute))
print(gain, attribute)
splitting_criterion = max(candidate_splitting_criterion, key=lambda item: item[0])[1]
print("chose attribute", splitting_criterion)
return splitting_criterion
def is_both_same_class(D):
group_size = D.groupby("class:buys_computer").size()
return (True, group_size.idxmax()) if group_size.size == 1 else (False, None)
def is_discrete(t):
return True
def get_part(D, splitting_criterion):
return D.groupby(splitting_criterion)
def get_majority_class(D):
return str(D.groupby("class:buys_computer").size().idxmax())
tree = Tree()
def generate_decision_tree(D, attribute_list):
N = tree.create_node()
tree.add_node(N)
is_same_class, C = is_both_same_class(D)
print(attribute_list)
print(D)
if is_same_class:
N.is_leaf = True
N.value = str(C)
print(type(N.value), N.value)
return N # 返回N作为叶节点,类C标记
if not attribute_list:
N.is_leaf = True
N.value = str(get_majority_class(D))
print(type(N.value), N.value)
return N #返回叶节点,多数类
splitting_criterion = attribute_selection_method(D, attribute_list)
N.value = str(splitting_criterion)
# print(splitting_criterion)
if is_discrete(splitting_criterion):
attribute_list.remove(splitting_criterion)
# print(attribute_list)
for (label, Dj) in get_part(D, splitting_criterion):
if Dj.empty:
value = get_majority_class(D) # 为啥会是空????
else:
child_N = generate_decision_tree(Dj, attribute_list)
child_N.label = label
tree.add_edge(N, child_N, label)
return N
N = generate_decision_tree(df, list(df.columns[1:-1]))
print(tree)
tree.get_json_data()
import json
data = tree.get_json_data()
print(data)
json.dumps(data)
```
|
github_jupyter
|
import numpy as np
import pandas as pd
df = pd.read_csv("data.csv")
import math
class Node:
def __init__(self):
self._is_result_node = False
self.childrens = []
def get_child_node(self, value):
return filter(lambda x: child.condition == value,self.childrens)[0]
def add_child_node(self, ):
pass
def is_result_node(self):
return self._is_result_node
class Tree:
def __init__(self):
self.childs = []
def build(self, D):
return self.childs
df
group1 = df.groupby('class:buys_computer')
print(group1)
str(group1)
total = len(df)
print(total)
df.groupby("class:buys_computer")
# print([len(x[1]) for x in group1])
InfoD = getInfo(df)
print(InfoD)
import uuid
InfoageD = 0
for x in group1:
column_name = x[0]
cdf = x[1]
property_size = len(cdf)
print(property_size)
print(column_name)
print(cdf)
InfoageD += property_size/total * getInfo(cdf)
print(InfoageD)
gain = InfoD - InfoageD
print(gain)
from from treelib import Node, Tree import Node, Tree
class Edge:
def __init__(self):
super().__init__()
self.source = None,
self.target = None
self.value = None
def get_json_data(self):
return {
"id": self.source.id + ":" + self.target.id,
"source": self.source.id,
"target": self.target.id,
"value": self.value
}
class Node:
def __init__(self):
super().__init__()
self.value = None
self.id = str(uuid.uuid1())
# def append_child(self, child):
# self.childs.append(child)
# def get_json_data(self):
# if self.childs:
# return {
# "_name": self.value,
# "children": [child.get_json_data() for child in self.childs]
# }
# else:
# return {
# "_name": self.value,
# "is_leaf": True
# }
def __str__(self):
u = (self.label, self.value)
return str(u)
class Tree:
def __init__(self):
super().__init__()
self.edges = {
}
self.root_id = "id"
self.nodes = {
}
def _get_child_node_json_data(self, node_id):
if node_id not in self.edges:
return {
"id": node_id,
"value": self.nodes[node_id].value,
"is_leaf": True,
}
else:
return {
"id": node_id,
"value": self.nodes[node_id].value,
"children": [self._get_child_node_json_data(edge.target.id) for edge in self.edges[node_id]]
}
def get_node_json_data(self):
return self._get_child_node_json_data(self.root_id)
def get_edge_json_data(self):
return [ edge.get_json_data() for node_edges in self.edges.values() for edge in node_edges]
def get_json_data(self):
return {
"nodes": self.get_node_json_data(),
"edges": self.get_edge_json_data(),
}
def create_node(self):
return Node()
def add_node(self, node):
if not self.nodes:
self.root_id = node.id
self.nodes[node.id] = node
def add_edge(self, par_node, node, edge_value):
par_node_id = par_node.id
if par_node_id not in self.edges:
self.edges[par_node_id] = []
edge = Edge()
edge.source = par_node
edge.target = node
edge.value = edge_value
self.edges[par_node_id].append(edge)
def attribute_selection_method(D, attribute_list):
def getInfo(D):
group = D.groupby('class:buys_computer')
total = len(D)
# print([len(x[1]) for x in group1])
InfoD = -sum([len(x[1])/total*math.log2(len(x[1])/total) for x in group])
return InfoD
candidate_splitting_criterion = []
for attribute in attribute_list:
group1 = D.groupby(attribute)
InfoageD = 0
for x in group1:
column_name = x[0]
cdf = x[1]
property_size = len(cdf)
# print(property_size)
# print(column_name)
# print(cdf)
InfoageD += property_size/total * getInfo(cdf)
# print(InfoageD)
gain = InfoD - InfoageD
candidate_splitting_criterion.append((gain, attribute))
print(gain, attribute)
splitting_criterion = max(candidate_splitting_criterion, key=lambda item: item[0])[1]
print("chose attribute", splitting_criterion)
return splitting_criterion
def is_both_same_class(D):
group_size = D.groupby("class:buys_computer").size()
return (True, group_size.idxmax()) if group_size.size == 1 else (False, None)
def is_discrete(t):
return True
def get_part(D, splitting_criterion):
return D.groupby(splitting_criterion)
def get_majority_class(D):
return str(D.groupby("class:buys_computer").size().idxmax())
tree = Tree()
def generate_decision_tree(D, attribute_list):
N = tree.create_node()
tree.add_node(N)
is_same_class, C = is_both_same_class(D)
print(attribute_list)
print(D)
if is_same_class:
N.is_leaf = True
N.value = str(C)
print(type(N.value), N.value)
return N # 返回N作为叶节点,类C标记
if not attribute_list:
N.is_leaf = True
N.value = str(get_majority_class(D))
print(type(N.value), N.value)
return N #返回叶节点,多数类
splitting_criterion = attribute_selection_method(D, attribute_list)
N.value = str(splitting_criterion)
# print(splitting_criterion)
if is_discrete(splitting_criterion):
attribute_list.remove(splitting_criterion)
# print(attribute_list)
for (label, Dj) in get_part(D, splitting_criterion):
if Dj.empty:
value = get_majority_class(D) # 为啥会是空????
else:
child_N = generate_decision_tree(Dj, attribute_list)
child_N.label = label
tree.add_edge(N, child_N, label)
return N
N = generate_decision_tree(df, list(df.columns[1:-1]))
print(tree)
tree.get_json_data()
import json
data = tree.get_json_data()
print(data)
json.dumps(data)
| 0.22482 | 0.203114 |
# Converting Institution data to AEZs
The processing steps are run on the data as it comes from the institutions, but now we can take that and turn it into AEZ regions to test how well our method does based on regional differences.
start by importing what you need
```
import numpy as np
import xarray as xr
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon
```
plan is to read in the institution data as gpd tables, combine them into one big table and then make cuts to that big table based on the shape files of the AEZs
```
# read in the institution files, preferably the ones that have had columns dropped already in the processing step
AGRYHMET = pd.read_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Institutions/AGRYHMET_wofs_ls_valid.csv')
RCMRD = pd.read_csv('../Results/WOfS_Assessment/wofs_lsPoint_Based/Institutions/RCMRD_wofs_ls_valid.csv')
OSS = pd.read_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Institutions/OSS_wofs_ls_valid.csv')
AFRIGIST = pd.read_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Institutions/AFRIGIST_wofs_ls_valid.csv')
# concatenate the institution data into a big table and check it
institutions = [AGRYHMET, RCMRD, OSS, AFRIGIST]
combined = pd.concat(institutions)
combined
# save the combined table as a csv
combined.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Africa_Combined_wofs_ls_valid.csv')
```
Now that they are all combined into one table, we need to turn it into geopandas to be able to compare it with the shape files...
```
# make a geopandas object
geo_combined = gpd.GeoDataFrame(combined, geometry=gpd.points_from_xy(combined.LON, combined.LAT), crs='EPSG:4326')
geo_combined = geo_combined.to_crs('EPSG:6933')
# save as a shape file for later
geo_combined.to_file('../Results/WOfS_Assessment/wofs_ls/Point_Based/Africa_Combined_wofs_ls_valid.shp')
#check the crs
geo_combined.crs
# read in the shape file, and set the co-ordinates
combined_shp = gpd.read_file('../Results/WOfS_Assessment/Point_Based/wofs_ls/Africa_Combined_wofs_ls_valid.shp')
combined_shp.crs
```
Now that it's a geopandas object we should be able to use a shape file to clip it?
```
shp_east = gpd.read_file('../Data/AEZs/Eastern.shp')
shp_east.crs
#check that the coordinates match and set them to be the same
shp_east = shp_east.to_crs('EPSG:6933')
shp_east.plot()
geo_combined.plot()
#east_wofs = gpd.sjoin(combined_shp.to_crs(shp_east.crs), shp_east, op='within')
east_wofs = gpd.sjoin(geo_combined, shp_east, op='within')
# check that the shape looks right
east_wofs.plot()
#check the table
east_wofs
# save out to file for the accuracy assesments
east_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Eastern_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_north = gpd.read_file('../Data/AEZs/Northern.shp')
shp_north = shp_north.to_crs('EPSG:6933')
north_wofs = gpd.sjoin(geo_combined, shp_north, op='within')
# check that the shape looks right
north_wofs.plot()
#check the table
north_wofs
# save out to file for the accuracy assesments
north_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Northern_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_south = gpd.read_file('../Data/AEZs/Southern.shp')
shp_south = shp_south.to_crs('EPSG:6933')
south_wofs = gpd.sjoin(geo_combined, shp_south, op='within')
# save out to file for the accuracy assesments
south_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Southern_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_sahel = gpd.read_file('../Data/AEZs/Sahel.shp')
shp_sahel = shp_sahel.to_crs('EPSG:6933')
sahel_wofs = gpd.sjoin(geo_combined, shp_sahel, op='within')
# save out to file for the accuracy assesments
sahel_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Sahel_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_west = gpd.read_file('../Data/AEZs/Western.shp')
shp_west = shp_west.to_crs('EPSG:6933')
west_wofs = gpd.sjoin(geo_combined, shp_west, op='within')
# save out to file for the accuracy assesments
west_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Western_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_central = gpd.read_file('../Data/AEZs/Central.shp')
shp_central = shp_central.to_crs('EPSG:6933')
central_wofs = gpd.sjoin(geo_combined, shp_central, op='within')
# save out to file for the accuracy assesments
central_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Central_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_io = gpd.read_file('../Data/AEZs/Indian_ocean.shp')
shp_io = shp_io.to_crs('EPSG:6933')
io_wofs = gpd.sjoin(geo_combined, shp_io, op='within')
# save out to file for the accuracy assesments
io_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Indian_ocean_wofs_ls_valid.csv')
io_wofs.plot()
```
|
github_jupyter
|
import numpy as np
import xarray as xr
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon
# read in the institution files, preferably the ones that have had columns dropped already in the processing step
AGRYHMET = pd.read_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Institutions/AGRYHMET_wofs_ls_valid.csv')
RCMRD = pd.read_csv('../Results/WOfS_Assessment/wofs_lsPoint_Based/Institutions/RCMRD_wofs_ls_valid.csv')
OSS = pd.read_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Institutions/OSS_wofs_ls_valid.csv')
AFRIGIST = pd.read_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Institutions/AFRIGIST_wofs_ls_valid.csv')
# concatenate the institution data into a big table and check it
institutions = [AGRYHMET, RCMRD, OSS, AFRIGIST]
combined = pd.concat(institutions)
combined
# save the combined table as a csv
combined.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Africa_Combined_wofs_ls_valid.csv')
# make a geopandas object
geo_combined = gpd.GeoDataFrame(combined, geometry=gpd.points_from_xy(combined.LON, combined.LAT), crs='EPSG:4326')
geo_combined = geo_combined.to_crs('EPSG:6933')
# save as a shape file for later
geo_combined.to_file('../Results/WOfS_Assessment/wofs_ls/Point_Based/Africa_Combined_wofs_ls_valid.shp')
#check the crs
geo_combined.crs
# read in the shape file, and set the co-ordinates
combined_shp = gpd.read_file('../Results/WOfS_Assessment/Point_Based/wofs_ls/Africa_Combined_wofs_ls_valid.shp')
combined_shp.crs
shp_east = gpd.read_file('../Data/AEZs/Eastern.shp')
shp_east.crs
#check that the coordinates match and set them to be the same
shp_east = shp_east.to_crs('EPSG:6933')
shp_east.plot()
geo_combined.plot()
#east_wofs = gpd.sjoin(combined_shp.to_crs(shp_east.crs), shp_east, op='within')
east_wofs = gpd.sjoin(geo_combined, shp_east, op='within')
# check that the shape looks right
east_wofs.plot()
#check the table
east_wofs
# save out to file for the accuracy assesments
east_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Eastern_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_north = gpd.read_file('../Data/AEZs/Northern.shp')
shp_north = shp_north.to_crs('EPSG:6933')
north_wofs = gpd.sjoin(geo_combined, shp_north, op='within')
# check that the shape looks right
north_wofs.plot()
#check the table
north_wofs
# save out to file for the accuracy assesments
north_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Northern_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_south = gpd.read_file('../Data/AEZs/Southern.shp')
shp_south = shp_south.to_crs('EPSG:6933')
south_wofs = gpd.sjoin(geo_combined, shp_south, op='within')
# save out to file for the accuracy assesments
south_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Southern_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_sahel = gpd.read_file('../Data/AEZs/Sahel.shp')
shp_sahel = shp_sahel.to_crs('EPSG:6933')
sahel_wofs = gpd.sjoin(geo_combined, shp_sahel, op='within')
# save out to file for the accuracy assesments
sahel_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Sahel_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_west = gpd.read_file('../Data/AEZs/Western.shp')
shp_west = shp_west.to_crs('EPSG:6933')
west_wofs = gpd.sjoin(geo_combined, shp_west, op='within')
# save out to file for the accuracy assesments
west_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Western_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_central = gpd.read_file('../Data/AEZs/Central.shp')
shp_central = shp_central.to_crs('EPSG:6933')
central_wofs = gpd.sjoin(geo_combined, shp_central, op='within')
# save out to file for the accuracy assesments
central_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Central_wofs_ls_valid.csv')
# and then repeat for the other regions
shp_io = gpd.read_file('../Data/AEZs/Indian_ocean.shp')
shp_io = shp_io.to_crs('EPSG:6933')
io_wofs = gpd.sjoin(geo_combined, shp_io, op='within')
# save out to file for the accuracy assesments
io_wofs.to_csv('../Results/WOfS_Assessment/wofs_ls/Point_Based/Indian_ocean_wofs_ls_valid.csv')
io_wofs.plot()
| 0.269999 | 0.860193 |
# T1217 - Browser Bookmark Discovery
Adversaries may enumerate browser bookmarks to learn more about compromised hosts. Browser bookmarks may reveal personal information about users (ex: banking sites, interests, social media, etc.) as well as details about internal network resources such as servers, tools/dashboards, or other related infrastructure.
Browser bookmarks may also highlight additional targets after an adversary has access to valid credentials, especially [Credentials In Files](https://attack.mitre.org/techniques/T1552/001) associated with logins cached by a browser.
Specific storage locations vary based on platform and/or application, but browser bookmarks are typically stored in local files/databases.
## Atomic Tests
```
#Import the Module before running the tests.
# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.
Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force
```
### Atomic Test #1 - List Mozilla Firefox Bookmark Database Files on Linux
Searches for Mozilla Firefox's places.sqlite file (on Linux distributions) that contains bookmarks and lists any found instances to a text file.
**Supported Platforms:** linux
#### Attack Commands: Run with `sh`
```sh
find / -path "*.mozilla/firefox/*/places.sqlite" 2>/dev/null -exec echo {} >> /tmp/T1217-Firefox.txt \;
cat /tmp/T1217-Firefox.txt 2>/dev/null
```
```
Invoke-AtomicTest T1217 -TestNumbers 1
```
### Atomic Test #2 - List Mozilla Firefox Bookmark Database Files on macOS
Searches for Mozilla Firefox's places.sqlite file (on macOS) that contains bookmarks and lists any found instances to a text file.
**Supported Platforms:** macos
#### Attack Commands: Run with `sh`
```sh
find / -path "*/Firefox/Profiles/*/places.sqlite" -exec echo {} >> /tmp/T1217_Firefox.txt \;
cat /tmp/T1217_Firefox.txt 2>/dev/null
```
```
Invoke-AtomicTest T1217 -TestNumbers 2
```
### Atomic Test #3 - List Google Chrome Bookmark JSON Files on macOS
Searches for Google Chrome's Bookmark file (on macOS) that contains bookmarks in JSON format and lists any found instances to a text file.
**Supported Platforms:** macos
#### Attack Commands: Run with `sh`
```sh
find / -path "*/Google/Chrome/*/Bookmarks" -exec echo {} >> /tmp/T1217-Chrome.txt \;
cat /tmp/T1217-Chrome.txt 2>/dev/null
```
```
Invoke-AtomicTest T1217 -TestNumbers 3
```
### Atomic Test #4 - List Google Chrome Bookmarks on Windows with powershell
Searches for Google Chromes's Bookmarks file (on Windows distributions) that contains bookmarks.
Upon execution, paths that contain bookmark files will be displayed.
**Supported Platforms:** windows
#### Attack Commands: Run with `powershell`
```powershell
Get-ChildItem -Path C:\Users\ -Filter Bookmarks -Recurse -ErrorAction SilentlyContinue -Force
```
```
Invoke-AtomicTest T1217 -TestNumbers 4
```
### Atomic Test #5 - List Google Chrome / Edge Chromium Bookmarks on Windows with command prompt
Searches for Google Chromes's and Edge Chromium's Bookmarks file (on Windows distributions) that contains bookmarks.
Upon execution, paths that contain bookmark files will be displayed.
**Supported Platforms:** windows
#### Attack Commands: Run with `command_prompt`
```command_prompt
where /R C:\Users\ Bookmarks
```
```
Invoke-AtomicTest T1217 -TestNumbers 5
```
### Atomic Test #6 - List Mozilla Firefox bookmarks on Windows with command prompt
Searches for Mozilla Firefox bookmarks file (on Windows distributions) that contains bookmarks in a SQLITE database.
Upon execution, paths that contain bookmark files will be displayed.
**Supported Platforms:** windows
#### Attack Commands: Run with `command_prompt`
```command_prompt
where /R C:\Users\ places.sqlite
```
```
Invoke-AtomicTest T1217 -TestNumbers 6
```
## Detection
Monitor processes and command-line arguments for actions that could be taken to gather browser bookmark information. Remote access tools with built-in features may interact directly using APIs to gather information. Information may also be acquired through system management tools such as [Windows Management Instrumentation](https://attack.mitre.org/techniques/T1047) and [PowerShell](https://attack.mitre.org/techniques/T1059/001).
System and network discovery techniques normally occur throughout an operation as an adversary learns the environment. Data and events should not be viewed in isolation, but as part of a chain of behavior that could lead to other activities, such as Collection and Exfiltration, based on the information obtained.
## Shield Active Defense
### Decoy Content
Seed content that can be used to lead an adversary in a specific direction, entice a behavior, etc.
Decoy Content is the data used to tell a story to an adversary. This content can be legitimate or synthetic data which is used to reinforce or validate your defensive strategy. Examples of decoy content are files on a storage object, entries in the system registry, system shortcuts, etc.
#### Opportunity
There is an opportunity to feed content to an adversary to influence their behaviors, test their interest in specific topics, or add legitimacy to a system or environment.
#### Use Case
A defender can use decoy content to give the false impression about the nature of the system in order to entice an adversary to continue engagement.
#### Procedures
Create directories and files with names and contents using key words that may be relevant to an adversary to see if they examine or exfiltrate the data.
Seed a file system with content that is of no value to the company but reinforces the legitimacy of the system if viewed by an adversary.
|
github_jupyter
|
#Import the Module before running the tests.
# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.
Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force
find / -path "*.mozilla/firefox/*/places.sqlite" 2>/dev/null -exec echo {} >> /tmp/T1217-Firefox.txt \;
cat /tmp/T1217-Firefox.txt 2>/dev/null
Invoke-AtomicTest T1217 -TestNumbers 1
find / -path "*/Firefox/Profiles/*/places.sqlite" -exec echo {} >> /tmp/T1217_Firefox.txt \;
cat /tmp/T1217_Firefox.txt 2>/dev/null
Invoke-AtomicTest T1217 -TestNumbers 2
find / -path "*/Google/Chrome/*/Bookmarks" -exec echo {} >> /tmp/T1217-Chrome.txt \;
cat /tmp/T1217-Chrome.txt 2>/dev/null
Invoke-AtomicTest T1217 -TestNumbers 3
Get-ChildItem -Path C:\Users\ -Filter Bookmarks -Recurse -ErrorAction SilentlyContinue -Force
Invoke-AtomicTest T1217 -TestNumbers 4
### Atomic Test #6 - List Mozilla Firefox bookmarks on Windows with command prompt
Searches for Mozilla Firefox bookmarks file (on Windows distributions) that contains bookmarks in a SQLITE database.
Upon execution, paths that contain bookmark files will be displayed.
**Supported Platforms:** windows
#### Attack Commands: Run with `command_prompt`
| 0.334916 | 0.858066 |
# Método das diferenças finitas: Convecção
Vamos resolver a equação de convecção:
$$\frac{\partial u}{\partial t} + c \frac{\partial u}{\partial x} = 0$$
## Setup
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
```
As células abaixo definem funções que criam o domínio e as condições iniciais.
```
def cria_dominios(tamanho, Nx, duração, Nt):
"""
Cria o domínio espacial e calcula os intervalos de tempo e espaço.
"""
x = np.linspace(0, tamanho, Nx)
dx = x[1] - x[0]
dt = duração/(Nt - 1)
return x, dx, dt
x, dx, dt = cria_dominios(tamanho=2, Nx=51, duração=1, Nt=51)
print('dx =', dx, 'dt =', dt)
def cria_cond_inicial(x):
"""
Cria um vetor de condições iniciais u0 com uma função degrau.
"""
u0 = np.ones(x.size)
u0[(x >= 0.2) & (x <= 0.5)] = 2
return u0
u0 = cria_cond_inicial(x)
plt.figure()
plt.plot(x, u0, '.-k')
plt.xlabel('x')
plt.ylabel('u')
plt.title('u0')
plt.ylim(0, 3)
```
## Tarefa 1
Complete a função abaixo que executa 1 único passo no tempo utilizando diferenças progressivas no tempo e regressivas no espaço.
```
def passo_no_tempo(u_passado, dx, dt, velocidade):
"""
Executa 1 passo no tempo.
Dado u_passado, utiliza o método das diferenças finitas
para calcular u_futuro com passo no tempo dt.
Utiliza a condição de contorno u(x=0, t) = 1.
"""
u_futuro = u_passado.copy()
Nx = len(u_passado)
for k in range(1, Nx):
u_futuro[k] = (u_passado[k] - 1 * dt/dx * (u_passado[k] - u_passado[k-1]))
return u_futuro
```
Use as células abaixo para checar se sua função funciona.
```
u1 = passo_no_tempo(u0, dx, dt, velocidade=1)
plt.figure()
plt.plot(x, u0, '--r')
plt.plot(x, u1, '.-k')
plt.xlabel('x')
plt.ylabel('u')
plt.ylim(0, 3)
```
## Tarefa 2
Complete a função abaixo que executa uma simulação completa de diferenças finitas (utilizando as funções definidas acima) para uma deterimada duração.
```
def simula(tamanho, Nx, duração, Nt, velocidade):
"""
Executa uma simulação completa da equação de convecção
utilizando diferenças finitas.
1. Cria o domínio e a condição inicial
2. Executa Nt passos no tempo
3. Retorna o domínio (x), a condição inicial (u0) e
o resultado final da simulação (u_futuro).
"""
x, dx, dt = cria_dominios(tamanho, Nx, duração, Nt)
u0 = cria_cond_inicial(x)
u_passado = u0
for t in range(1, Nt):
u_futuro = passo_no_tempo(u_passado, dx, dt, velocidade)
return x, u0, u_futuro
```
Utilize as células abaixo para checar o resultado da sua função.
```
x, u0, u_futuro = simula(tamanho=2, Nx=51, duração=1, Nt=51, velocidade=1)
plt.figure()
plt.plot(x, u0, '--r')
plt.plot(x, u_futuro, '.-k')
plt.xlabel('x')
plt.ylabel('u')
plt.ylim(0, 3)
```
### O que aconteceu com o resultado no final da simulação? Isso deveria acontecer?
## Tarefa 3
Faça uma figura com o resultado da simulação para diferentes valores `Nx` (utilize a lista abaixo). Inclua uma legenda no seu gráfico.
```
valores_de_Nx = [51, 71, 91, 101, 111]
for valores in valores_de_Nx:
x, u0, u_futuro = simula(tamanho=2, Nx = valores, duração=1, Nt=51, velocidade=1)
plt.figure()
plt.title("Nx = " + str(valores))
plt.plot(x, u0, '--r')
plt.plot(x, u_futuro, '.-k')
plt.xlabel('x')
plt.ylabel('u')
plt.ylim(0, 3)
```
### O método é igualmente preciso para todos os valores de Nx?
## Bônus
Complete a função abaixo que executa a simulação completa mas dessa vez guarda cada passo da simulação. A função deve gerar uma lista `u` que contem o valor de $u$ de cada iteração.
Complete o código que gera um gráfico com o valor de `u` a cada 10 iterações. Ou seja, o gráfico deve conter `u[0]`, `u[10]`, `u[20]`, etc.
```
def simula_grava(tamanho, Nx, duração, Nt, velocidade):
"""
Executa uma simulação completa da equação de convecção
utilizando diferenças finitas e grava cada iteração.
1. Cria o domínio e a condição inicial
2. Executa Nt passos no tempo
3. Retorna o domínio (x) e uma lista (u) com o resultado
de cada passo no tempo (incluindo o u0).
"""
return x, u
x, u = simula_grava(tamanho=2, Nx=51, duração=1, Nt=51, velocidade=1)
plt.figure()
plt.xlabel('x')
plt.ylabel('u')
plt.ylim(0, 3)
plt.title('Simulação completa (cada 10 iterações)')
```
**Course website**: https://github.com/mat-esp/about
**Note**: This notebook is part of the course "Matemática Especial I" of the [Universidade do Estado do Rio de Janeiro](http://www.uerj.br/). All content can be freely used and adapted under the terms of the
[Creative Commons Attribution 4.0 International License](http://creativecommons.org/licenses/by/4.0/).

|
github_jupyter
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def cria_dominios(tamanho, Nx, duração, Nt):
"""
Cria o domínio espacial e calcula os intervalos de tempo e espaço.
"""
x = np.linspace(0, tamanho, Nx)
dx = x[1] - x[0]
dt = duração/(Nt - 1)
return x, dx, dt
x, dx, dt = cria_dominios(tamanho=2, Nx=51, duração=1, Nt=51)
print('dx =', dx, 'dt =', dt)
def cria_cond_inicial(x):
"""
Cria um vetor de condições iniciais u0 com uma função degrau.
"""
u0 = np.ones(x.size)
u0[(x >= 0.2) & (x <= 0.5)] = 2
return u0
u0 = cria_cond_inicial(x)
plt.figure()
plt.plot(x, u0, '.-k')
plt.xlabel('x')
plt.ylabel('u')
plt.title('u0')
plt.ylim(0, 3)
def passo_no_tempo(u_passado, dx, dt, velocidade):
"""
Executa 1 passo no tempo.
Dado u_passado, utiliza o método das diferenças finitas
para calcular u_futuro com passo no tempo dt.
Utiliza a condição de contorno u(x=0, t) = 1.
"""
u_futuro = u_passado.copy()
Nx = len(u_passado)
for k in range(1, Nx):
u_futuro[k] = (u_passado[k] - 1 * dt/dx * (u_passado[k] - u_passado[k-1]))
return u_futuro
u1 = passo_no_tempo(u0, dx, dt, velocidade=1)
plt.figure()
plt.plot(x, u0, '--r')
plt.plot(x, u1, '.-k')
plt.xlabel('x')
plt.ylabel('u')
plt.ylim(0, 3)
def simula(tamanho, Nx, duração, Nt, velocidade):
"""
Executa uma simulação completa da equação de convecção
utilizando diferenças finitas.
1. Cria o domínio e a condição inicial
2. Executa Nt passos no tempo
3. Retorna o domínio (x), a condição inicial (u0) e
o resultado final da simulação (u_futuro).
"""
x, dx, dt = cria_dominios(tamanho, Nx, duração, Nt)
u0 = cria_cond_inicial(x)
u_passado = u0
for t in range(1, Nt):
u_futuro = passo_no_tempo(u_passado, dx, dt, velocidade)
return x, u0, u_futuro
x, u0, u_futuro = simula(tamanho=2, Nx=51, duração=1, Nt=51, velocidade=1)
plt.figure()
plt.plot(x, u0, '--r')
plt.plot(x, u_futuro, '.-k')
plt.xlabel('x')
plt.ylabel('u')
plt.ylim(0, 3)
valores_de_Nx = [51, 71, 91, 101, 111]
for valores in valores_de_Nx:
x, u0, u_futuro = simula(tamanho=2, Nx = valores, duração=1, Nt=51, velocidade=1)
plt.figure()
plt.title("Nx = " + str(valores))
plt.plot(x, u0, '--r')
plt.plot(x, u_futuro, '.-k')
plt.xlabel('x')
plt.ylabel('u')
plt.ylim(0, 3)
def simula_grava(tamanho, Nx, duração, Nt, velocidade):
"""
Executa uma simulação completa da equação de convecção
utilizando diferenças finitas e grava cada iteração.
1. Cria o domínio e a condição inicial
2. Executa Nt passos no tempo
3. Retorna o domínio (x) e uma lista (u) com o resultado
de cada passo no tempo (incluindo o u0).
"""
return x, u
x, u = simula_grava(tamanho=2, Nx=51, duração=1, Nt=51, velocidade=1)
plt.figure()
plt.xlabel('x')
plt.ylabel('u')
plt.ylim(0, 3)
plt.title('Simulação completa (cada 10 iterações)')
| 0.464173 | 0.958069 |
# Plotagem e formatação de gráficos
## Objetivos
- Realizar plotagem de gráficos de funções de uma variável;
- Compreender o funcionamento básico da classe _artist_ (_axes_, _figures_, _subplots_);
- Alterar propriedades de linhas, marcadores e legendas;
- Inserir _labels_, títulos e anotações simples;
## Introdução à visualização de dados
- Representar dados e informações graficamente por meio de elementos visuais
- Tabelas, gráficos, mapas e infográficos
- Diversas ferramentas disponíveis
- No contexto da análise de dados:
- relatórios de negócios
- painéis de instrumentos (_dashboards_)
- gráficos multidimensionais
## _Data storytelling_
- Processo de "contar histórias através dos dados".
> Não somos inerentemente bons para "contar uma história" através dos dados. (Cole Knaflic)
## Plotagem matemática
- _Plotagem_: termo comumente empregado para o esboço de gráficos de funções matemáticas via computador.
- Nesta aual: visão geral sobre a plotagem de funções matemáticas utilizando *numpy*
## A biblioteca *matplotlib*
- Biblioteca Python mais conhecida para plotagem 2D
- Filosofia: criar plotagens com apenas alguns comandos, ou apenas um.
- Criado por John Hunter (_in memoriam_)
- Ele queria:
- gráficos bonitos com pronta qualidade para publicação
- capacidade de incorporação em interfaces gráficas para desenvolvimento de aplicações
- um código fácil de entender e de manusear
O *matplotlib* é um código dividido em três partes:
1. A interface *pylab*: um conjunto de funções predefinidas no submódulo `matplotlib.pyplot`.
2. O *frontend*: um conjunto de classes responsáveis pela criação de figuras, textos, linhas, gráficos etc.
3. O *backend*: um conjunto de renderizadores responsáveis por converter os gráficos para dispositivos onde eles podem ser, de fato, visualizados.
Exemplo:
- *Backend* PS renderiza PostScript
- *Backend* SVG constroi gráficos vetoriais escaláveis
### Sessões interativas do *matplotlib*
- Em consoles, use `%matplotlib`;
- No Jupyter notebook, use `%matplotlib inline`.
```
# chamada padrão
%matplotlib inline
import matplotlib.pyplot as plt
```
## Criação de plots simples
Vamos importar o *numpy* para usarmos os benefícios da computação vetorizada e plotar nossos primeiros exemplos.
```
import numpy as np
x = np.linspace(-5,5,20) # domínio
y = x**2 # imagem
plt.plot(x,y); # reta y = x
```
**Exemplo:** plote o gráfico da parábola $f(x) = ax^2 + bx + c$ para valores quaisquer de $a,b,c$ no intervalo $-20 \leq x \leq 20$.
```
x = np.linspace(-20,20,50)
a,b,c = 2,3,4
y = a*x**2 + b*x + c # f(x)
plt.plot(x,y);
```
Podemos definir uma função para plotar a parábola:
```
def plota_parabola(a,b,c):
x = np.linspace(-20,21,50)
y = a*x**2 + b*x + c
plt.plot(x,y)
```
Agora podemos estudar o que cada coeficiente faz:
```
# mude o valor de a e considere b = 2, c = 1
for a in np.linspace(-20,20,10):
plota_parabola(a,2,1)
# mude o valor de b e considere a = 2, c = 1
for b in np.linspace(-4,4,20):
plota_parabola(2,b,1)
# mude o valor de c e considere a = 2, b = 1
for c in np.linspace(-5,5,10):
plota_parabola(2,1,c) # por que você não vê muitas mudanças?
# mude o valor de a, b e c
valores = np.linspace(-3,5,5)
for a in valores:
for b in valores:
for c in valores:
plota_parabola(a,b,c)
```
**Exemplo:** plote o gráfico da função $g(t) = a\cos(bt + \pi)$ para valores quaisquer de $a$ e $b$ no intervalo $0 \leq t \leq 2\pi$.
```
t = np.linspace(0,2*np.pi,50,endpoint=True) # t: ângulo
a, b = 1, 1
plt.plot(t,a*np.cos(b*t + np.pi));
b = 2
plt.plot(t,a*np.cos(b*t + np.pi));
b = 3
plt.plot(t,a*np.cos(b*t + np.pi));
```
## Alteração de propriedades e estilos de linhas
Altere:
- cores com `color` ou `c`,
- espessura de linha com `linewidth` ou `lw`
- estilo de linha com `linestyle` ou `ls`
- tipo de símbolo marcador com `marker`
- largura de borda do símbolo marcardor com `markeredgewidth` ou `mew`
- cor de borda do símbolo marcardor com `markeredgecolor` ou `mec`
- cor de face do símbolo marcardor com `markerfacecolor` ou `mfc`
- transparência com `alpha` no intervalo [0,1]
```
g = lambda a,b: a*np.cos(b*t + np.pi) # assume t anterior
# estude cada exemplo
# a ordem do 3o. argumento em diante pode mudar
plt.plot(t,g(1,1),color='c',linewidth=5,linestyle='-.',alpha=.3)
plt.plot(t,g(1,2),c='g',ls='--',lw='.7',marker='s',mfc='y',ms=10)
plt.plot(t,g(1,3),c='#e26d5a',ls=':', marker='d',mec='k',mew=1.0);
```
Cores e estilo de linha podem ser especificados de modo reduzido e em ordens distintas usando um especificador de formato.
```
plt.plot(t,g(1,1),'yv') # amarelo; triângulo para baixo;
plt.plot(t,g(1,2),':c+') # pontilhado; ciano; cruz;
plt.plot(t,-g(2,2),'>-.r'); # triangulo direita; traço-ponto; vermelho;
```
### Plotagem múltipla
O exemplo acima poderia ser feito como plotagem múltipla em 3 blocos do tipo (`x,y,'fmt')`, onde `x` e `y` são as informações dos eixos coordenados e `fmt` é uma string de formatação.
```
plt.plot(t,g(1,1),'yv', t,g(1,2),':c+', t,-g(2,2),'>-.r'); # 3 blocos sequenciados
```
Para verificar todas as opções de propriedades e estilos de linhas, veja `plt.plot?`.
```
#plt.plot?
```
### Especificação de figuras
Use `plt.figure` para criar um ambiente de figura e altere:
- a largura e altura (em polegadas) com `figsize = (largura,altura)`. O padrão é (6.4,4.8).
- a resolução (em pontos por polegadas) com `dpi`. O padrão é 100.
- a cor de fundo (*background*) com `facecolor`. O padrão é `w` (branco).
**Exemplo:** Plote os gráficos de $h_1(x) = a\sqrt{x}$ e $h_2(x) = be^{\frac{x}{c}}$ para valores de a,b,c e propriedades acima livres.
```
x = np.linspace(0,10,50,endpoint=True)
h1, h2 = lambda a: a*np.sqrt(x), lambda b,c: b*np.exp(x/c)
plt.figure(figsize=(8,6), dpi=200, facecolor='#a0aeee')
plt.plot(x,h1(.9),x,h2(1,9));
```
### Alterando limites e marcações de eixos
Altere:
- o intervalo do eixo `x` com `xlim`
- o intervalo do eixo `y` com `ylim`
- as marcações do eixo `x` com `xticks`
- as marcações do eixo `y` com `yticks`
```
plt.plot(x,h1(.9),x,h2(1,9)); plt.xlim(1.6,9.2); plt.ylim(1.0,2.8);
plt.figure(figsize=(10,8))
plt.plot(t,g(1,3),c=[0.1,0.4,0.5],marker='s',mfc='w',mew=2.0);
plt.plot(t,g(1.2,2),c=[1.0,0.5,0.0],ls='--',marker='>',mfc='c',mew=1.0,ms=10);
plt.xticks([0, np.pi/2,np.pi,3*np.pi/2,2*np.pi]); # lista de múltiplos de pi
plt.yticks([-1, 0, 1]); # 3 valores em y
```
### Especificando texto de marcações em eixos
Podemos alterar as marcações das `ticks` passando um texto indicativo. No caso anterior, seria melhor algo como:
```
plt.figure(figsize=(10,8))
plt.plot(t,g(1,3),c=[0.1,0.4,0.5],marker='s',mfc='w',mew=2.0);
plt.plot(t,g(1.2,2),c=[1.0,0.5,0.0],ls='--',marker='>',mfc='c',mew=1.0,ms=10);
# o par de $...$ formata os números na linguagem TeX
plt.xticks([0, np.pi/2,np.pi,3*np.pi/2,2*np.pi], ['$0$','$\pi/2$','$\pi$','$3/2\pi$','$2\pi$']);
plt.yticks([-1, 0, 1], ['$y = -1$', '$y = 0$', '$y = +1$']);
```
### Deslocamento de eixos principais
Os eixos principais podem ser movidos para outras posições arbitrárias e as bordas da área de plotagem desligadas usando `spine`.
```
# plotagem da função
x = np.linspace(-3,3)
plt.plot(x,x**1/2*np.sin(x)-0.5); # f(x) = √x*sen(x) - 1/2
ax = plt.gca() # get current axis
ax.spines['right'].set_color('none') # remove borda direita
ax.spines['top'].set_color('none') # remove borda superior
ax.spines['bottom'].set_position(('data',0)) # desloca eixo para x = 0
ax.spines['left'].set_position(('data',0)) # desloca eixo para y = 0
ax.xaxis.set_ticks_position('top') # desloca marcações para cima
ax.yaxis.set_ticks_position('right') # desloca marcações para a direita
plt.xticks([-2,0,2]) # altera ticks de x
ax.set_xticklabels(['esq.','zero','dir.']) # altera ticklabels de x
plt.yticks([-0.4,0,0.4]) # altera ticks de y
ax.set_yticklabels(['sup.','zero','inf.']); # altera ticklabels de y
```
### Inserção de legendas
Para criarmos:
- uma legenda para os gráficos, usamos `legend`.
- uma legenda para o eixo x, usamos `xlabel`
- uma legenda para o eixo y, usamos `ylabel`
- um título para o gráfico, usamos `title`
**Exemplo:** plote o gráfico da reta $f_1(x) = x + 1$ e da reta $f_2(x) = 1 - x$ e adicione uma legenda com cores azul e laranja.
```
plt.plot(x, x + 1,'-b', label = 'y = x + 1' )
plt.plot(x, 1-x, c = [1.0,0.5,0.0], label = 'y = 1 - x'); # laranja: 100% de vermelho, 50% verde
plt.legend(loc = 'best') # 'loc=best' : melhor localização da legenda
plt.xlabel('x'); plt.ylabel('y'); plt.title('Gráfico de duas retas');
```
#### Localização de legendas
Use `loc=valor` para especificar onde posicionar a legenda. Use `plt.legend?` para verificar as posições disponíveis para `valor`. Vide tabela de valores `Location String` e `Location Code`.
```
plt.plot(np.nan,np.nan,label='upper right'); # nan : not a number
plt.legend(loc=1); # usando número
plt.plot(np.nan,np.nan,label='loc=1');
plt.legend(loc='upper right'); # usando a string correspondente
```
### Alteração de tamanho de fonte
Para alterar o tamanho da fonte de legendas, use `fontsize`.
```
plt.plot(np.nan,np.nan,label='legenda');
FSx, FSy, FSleg, FStit = 10, 20, 30, 40
plt.xlabel('Eixo x',c='b', fontsize=FSx)
plt.ylabel('Eixo y',c='g', fontsize=FSy)
plt.legend(loc='center', fontsize=FSleg);
plt.title('Título', c='c', fontsize=FStit);
```
### Anotações simples
Podemos incluir anotações em gráficos com a função `annotate(texto,xref,yref)`
**Exemplo**: gere um conjunto de 10 pontos $(x,y)$ aleatórios em que $0.2 < x,y < 0.8$ e anote-os no plano.
```
# gera uma lista de 10 pontos satisfazendo a condição
P = []
while len(P) != 10:
xy = np.round(np.random.rand(2),1)
test = np.all( (xy > 0.2) & (xy < 0.8) )
if test:
P.append(tuple(xy))
# plota o plano
plt.figure(figsize=(8,8))
plt.xlim(0,1)
plt.ylim(0,1)
for ponto in P:
plt.plot(ponto[0],ponto[1],'o')
plt.annotate(f'({ponto[0]},{ponto[1]})',ponto,fontsize=14)
```
**Problema:** o código acima tem um problema. Verifique que `len(P) = 10`, mas ele não plota os 10 pontos como gostaríamos de ver. Descubra o que está acontecendo e proponha uma solução.
## Multiplotagem e eixos
No matplotlib, podemos trabalhar com a função `subplot(m,n,p)` para criar múltiplas figuras e eixos independentes como se cada figura fosse um elemento de uma grande "matriz de figuras" de `m` linhas e `n` colunas, enquanto `p` é o índice da figura (este valor será no máximo o produto `mxn`). A função funciona da seguinte forma.
- Exemplo 1: suponha que você queira criar 3 figuras e dispô-las em uma única linha. Neste caso, `m = 1`, `n = 3` e `p` variará de 1 a 3, visto que `mxn = 3`.
- Exemplo 2: suponha que você queira criar 6 figuras e dispô-las em 2 linhas e 3 colunas. Neste caso, `m = 2`, `n = 3` e `p` variará de 1 a 6, visto que `mxn = 6`.
- Exemplo 3: suponha que você queira criar 12 figuras e dispô-las em 4 linhas e 3 colunas. Neste caso, `m = 4`, `n = 3` e `p` variará de 1 a 12, visto que `mxn = 12`.
Cada plotagem possui seu eixo independentemente da outra.
**Exemplo 1:** gráfico de 1 reta, 1 parábola e 1 polinômio cúbico lado a lado.
```
x = np.linspace(-5,5,20)
plt.figure(figsize=(15,4))
# aqui p = 1
plt.subplot(1,3,1) # plt.subplot(131) também é válida
plt.plot(x,2*x-1,c='r',marker='^')
plt.title('$y=2x-1$')
# aqui p = 2
plt.subplot(1,3,2) # plt.subplot(132) também é válida
plt.plot(x,3*x**2 - 2*x - 1,c='g',marker='o')
plt.title('$y=3x^2 - 2x - 1$')
# aqui p = 3
plt.subplot(1,3,3) # plt.subplot(133) também é válida
plt.plot(x,1/2*x**3 + 3*x**2 - 2*x - 1,c='b',marker='*')
plt.title('$y=1/2x^3 + 3x^2 - 2x - 1$');
```
**Exemplo 2:** gráficos de {$sen(x)$, $sen(2x)$, $sen(3x)$} e {$cos(x)$, $cos(2x)$, $cos(3x)$} dispostos em matriz 2x3.
```
plt.figure(figsize=(15,4))
plt.subplots_adjust(top=2.5,right=1.2) # ajusta a separação dos plots individuais
def sencosx(p):
x = np.linspace(0,2*np.pi,50)
plt.subplot(2,3,p)
if p <= 3:
plt.plot(x,np.sin(p*x),c=[p/4,p/5,p/6],label=f'$sen({p}x)$')
plt.title(f'subplot(2,3,{p})');
else:
plt.title(f'subplot(2,3,{p})');
p-=3 #
plt.plot(x,np.cos(p*x),c=[p/9,p/7,p/8],label=f'$cos({p}x)$')
plt.legend(loc=0,fontsize=8)
plt.xlabel('x'); plt.ylabel('y');
# plotagem
for p in range(1,7):
sencosx(p)
```
**Exemplo 3:** gráficos de um ponto isolado em matriz 4 x 3.
```
plt.figure(figsize=(15,4))
m,n = 4,3
def star(p):
plt.subplot(m,n,p)
plt.axis('off') # desliga eixos
plt.plot(0.5,0.5,marker='*',c=list(np.random.rand(3)),ms=p*2)
plt.annotate(f'subplot({m},{n},{p})',(0.5,0.5),c='g',fontsize=10)
for p in range(1,m*n+1):
star(p);
```
## Plots com gradeado
Podemos habilitar o gradeado usando `grid(b,which,axis)`.
Para especificar o gradeado:
- em ambos os eixos, use `b='True'` ou `b='False'`.
- maior, menor ou ambos, use `which='major'`, `which='minor'` ou `which='both'`.
- nos eixos x, y ou ambos, use `axis='x'`, `axis='y'` ou `axis='both'`.
```
x = np.linspace(-10,10)
plt.plot(x,x)
plt.grid(b='True')
plt.plot(x,x)
plt.grid(True,which='major',axis='x')
plt.plot(x,x)
plt.grid(True,which='major',axis='y')
```
**Exemplo:** plotagem de gradeado.
Neste exemplo, um eixo abstrato é adicionado sobre a figura (criada diretamente) origem no ponto (0.025,0.025), largura 0.95 e altura 0.95.
```
ax = plt.axes([0.025, 0.025, 0.95, 0.95])
ax.set_xlim(0,4)
ax.set_ylim(0,3)
# MultipleLocator estabelece pontos de referência para divisão da grade
ax.xaxis.set_major_locator(plt.MultipleLocator(1.0)) # divisor maior em X
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.2)) # divisor maior em X
ax.yaxis.set_major_locator(plt.MultipleLocator(1.0)) # divisor maior em Y
ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1)) # divisor maior em Y
# propriedades das linhas
ax.grid(which='major', axis='x', linewidth=0.75, linestyle='-', color='r')
ax.grid(which='minor', axis='x', linewidth=0.5, linestyle=':', color='b')
ax.grid(which='major', axis='y', linewidth=0.75, linestyle='-', color='r')
ax.grid(which='minor', axis='y', linewidth=0.5, linestyle=':', color='g')
# para remover as ticks, adicione comentários
#ax.set_xticklabels([])
#ax.set_yticklabels([]);
plt.plot(x,x,'k')
plt.plot(x,-x+4,'k');
```
## Plots com preenchimento
Podemos usar `fill_between` para criar preenchimentos de área em gráficos.
```
x = np.linspace(-np.pi, np.pi, 60)
y = np.sin(2*x)*np.cos(x/2)
plt.fill_between(x,y,alpha=0.5);
x = np.linspace(-np.pi, np.pi, 60)
f1 = np.sin(2*x) - 1
f2 = x**2
plt.plot(x,f1,c='w');
plt.plot(x,f2,c='w');
plt.fill_between(x,f1,f2,color='g',alpha=0.2);
```
|
github_jupyter
|
# chamada padrão
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-5,5,20) # domínio
y = x**2 # imagem
plt.plot(x,y); # reta y = x
x = np.linspace(-20,20,50)
a,b,c = 2,3,4
y = a*x**2 + b*x + c # f(x)
plt.plot(x,y);
def plota_parabola(a,b,c):
x = np.linspace(-20,21,50)
y = a*x**2 + b*x + c
plt.plot(x,y)
# mude o valor de a e considere b = 2, c = 1
for a in np.linspace(-20,20,10):
plota_parabola(a,2,1)
# mude o valor de b e considere a = 2, c = 1
for b in np.linspace(-4,4,20):
plota_parabola(2,b,1)
# mude o valor de c e considere a = 2, b = 1
for c in np.linspace(-5,5,10):
plota_parabola(2,1,c) # por que você não vê muitas mudanças?
# mude o valor de a, b e c
valores = np.linspace(-3,5,5)
for a in valores:
for b in valores:
for c in valores:
plota_parabola(a,b,c)
t = np.linspace(0,2*np.pi,50,endpoint=True) # t: ângulo
a, b = 1, 1
plt.plot(t,a*np.cos(b*t + np.pi));
b = 2
plt.plot(t,a*np.cos(b*t + np.pi));
b = 3
plt.plot(t,a*np.cos(b*t + np.pi));
g = lambda a,b: a*np.cos(b*t + np.pi) # assume t anterior
# estude cada exemplo
# a ordem do 3o. argumento em diante pode mudar
plt.plot(t,g(1,1),color='c',linewidth=5,linestyle='-.',alpha=.3)
plt.plot(t,g(1,2),c='g',ls='--',lw='.7',marker='s',mfc='y',ms=10)
plt.plot(t,g(1,3),c='#e26d5a',ls=':', marker='d',mec='k',mew=1.0);
plt.plot(t,g(1,1),'yv') # amarelo; triângulo para baixo;
plt.plot(t,g(1,2),':c+') # pontilhado; ciano; cruz;
plt.plot(t,-g(2,2),'>-.r'); # triangulo direita; traço-ponto; vermelho;
plt.plot(t,g(1,1),'yv', t,g(1,2),':c+', t,-g(2,2),'>-.r'); # 3 blocos sequenciados
#plt.plot?
x = np.linspace(0,10,50,endpoint=True)
h1, h2 = lambda a: a*np.sqrt(x), lambda b,c: b*np.exp(x/c)
plt.figure(figsize=(8,6), dpi=200, facecolor='#a0aeee')
plt.plot(x,h1(.9),x,h2(1,9));
plt.plot(x,h1(.9),x,h2(1,9)); plt.xlim(1.6,9.2); plt.ylim(1.0,2.8);
plt.figure(figsize=(10,8))
plt.plot(t,g(1,3),c=[0.1,0.4,0.5],marker='s',mfc='w',mew=2.0);
plt.plot(t,g(1.2,2),c=[1.0,0.5,0.0],ls='--',marker='>',mfc='c',mew=1.0,ms=10);
plt.xticks([0, np.pi/2,np.pi,3*np.pi/2,2*np.pi]); # lista de múltiplos de pi
plt.yticks([-1, 0, 1]); # 3 valores em y
plt.figure(figsize=(10,8))
plt.plot(t,g(1,3),c=[0.1,0.4,0.5],marker='s',mfc='w',mew=2.0);
plt.plot(t,g(1.2,2),c=[1.0,0.5,0.0],ls='--',marker='>',mfc='c',mew=1.0,ms=10);
# o par de $...$ formata os números na linguagem TeX
plt.xticks([0, np.pi/2,np.pi,3*np.pi/2,2*np.pi], ['$0$','$\pi/2$','$\pi$','$3/2\pi$','$2\pi$']);
plt.yticks([-1, 0, 1], ['$y = -1$', '$y = 0$', '$y = +1$']);
# plotagem da função
x = np.linspace(-3,3)
plt.plot(x,x**1/2*np.sin(x)-0.5); # f(x) = √x*sen(x) - 1/2
ax = plt.gca() # get current axis
ax.spines['right'].set_color('none') # remove borda direita
ax.spines['top'].set_color('none') # remove borda superior
ax.spines['bottom'].set_position(('data',0)) # desloca eixo para x = 0
ax.spines['left'].set_position(('data',0)) # desloca eixo para y = 0
ax.xaxis.set_ticks_position('top') # desloca marcações para cima
ax.yaxis.set_ticks_position('right') # desloca marcações para a direita
plt.xticks([-2,0,2]) # altera ticks de x
ax.set_xticklabels(['esq.','zero','dir.']) # altera ticklabels de x
plt.yticks([-0.4,0,0.4]) # altera ticks de y
ax.set_yticklabels(['sup.','zero','inf.']); # altera ticklabels de y
plt.plot(x, x + 1,'-b', label = 'y = x + 1' )
plt.plot(x, 1-x, c = [1.0,0.5,0.0], label = 'y = 1 - x'); # laranja: 100% de vermelho, 50% verde
plt.legend(loc = 'best') # 'loc=best' : melhor localização da legenda
plt.xlabel('x'); plt.ylabel('y'); plt.title('Gráfico de duas retas');
plt.plot(np.nan,np.nan,label='upper right'); # nan : not a number
plt.legend(loc=1); # usando número
plt.plot(np.nan,np.nan,label='loc=1');
plt.legend(loc='upper right'); # usando a string correspondente
plt.plot(np.nan,np.nan,label='legenda');
FSx, FSy, FSleg, FStit = 10, 20, 30, 40
plt.xlabel('Eixo x',c='b', fontsize=FSx)
plt.ylabel('Eixo y',c='g', fontsize=FSy)
plt.legend(loc='center', fontsize=FSleg);
plt.title('Título', c='c', fontsize=FStit);
# gera uma lista de 10 pontos satisfazendo a condição
P = []
while len(P) != 10:
xy = np.round(np.random.rand(2),1)
test = np.all( (xy > 0.2) & (xy < 0.8) )
if test:
P.append(tuple(xy))
# plota o plano
plt.figure(figsize=(8,8))
plt.xlim(0,1)
plt.ylim(0,1)
for ponto in P:
plt.plot(ponto[0],ponto[1],'o')
plt.annotate(f'({ponto[0]},{ponto[1]})',ponto,fontsize=14)
x = np.linspace(-5,5,20)
plt.figure(figsize=(15,4))
# aqui p = 1
plt.subplot(1,3,1) # plt.subplot(131) também é válida
plt.plot(x,2*x-1,c='r',marker='^')
plt.title('$y=2x-1$')
# aqui p = 2
plt.subplot(1,3,2) # plt.subplot(132) também é válida
plt.plot(x,3*x**2 - 2*x - 1,c='g',marker='o')
plt.title('$y=3x^2 - 2x - 1$')
# aqui p = 3
plt.subplot(1,3,3) # plt.subplot(133) também é válida
plt.plot(x,1/2*x**3 + 3*x**2 - 2*x - 1,c='b',marker='*')
plt.title('$y=1/2x^3 + 3x^2 - 2x - 1$');
plt.figure(figsize=(15,4))
plt.subplots_adjust(top=2.5,right=1.2) # ajusta a separação dos plots individuais
def sencosx(p):
x = np.linspace(0,2*np.pi,50)
plt.subplot(2,3,p)
if p <= 3:
plt.plot(x,np.sin(p*x),c=[p/4,p/5,p/6],label=f'$sen({p}x)$')
plt.title(f'subplot(2,3,{p})');
else:
plt.title(f'subplot(2,3,{p})');
p-=3 #
plt.plot(x,np.cos(p*x),c=[p/9,p/7,p/8],label=f'$cos({p}x)$')
plt.legend(loc=0,fontsize=8)
plt.xlabel('x'); plt.ylabel('y');
# plotagem
for p in range(1,7):
sencosx(p)
plt.figure(figsize=(15,4))
m,n = 4,3
def star(p):
plt.subplot(m,n,p)
plt.axis('off') # desliga eixos
plt.plot(0.5,0.5,marker='*',c=list(np.random.rand(3)),ms=p*2)
plt.annotate(f'subplot({m},{n},{p})',(0.5,0.5),c='g',fontsize=10)
for p in range(1,m*n+1):
star(p);
x = np.linspace(-10,10)
plt.plot(x,x)
plt.grid(b='True')
plt.plot(x,x)
plt.grid(True,which='major',axis='x')
plt.plot(x,x)
plt.grid(True,which='major',axis='y')
ax = plt.axes([0.025, 0.025, 0.95, 0.95])
ax.set_xlim(0,4)
ax.set_ylim(0,3)
# MultipleLocator estabelece pontos de referência para divisão da grade
ax.xaxis.set_major_locator(plt.MultipleLocator(1.0)) # divisor maior em X
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.2)) # divisor maior em X
ax.yaxis.set_major_locator(plt.MultipleLocator(1.0)) # divisor maior em Y
ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1)) # divisor maior em Y
# propriedades das linhas
ax.grid(which='major', axis='x', linewidth=0.75, linestyle='-', color='r')
ax.grid(which='minor', axis='x', linewidth=0.5, linestyle=':', color='b')
ax.grid(which='major', axis='y', linewidth=0.75, linestyle='-', color='r')
ax.grid(which='minor', axis='y', linewidth=0.5, linestyle=':', color='g')
# para remover as ticks, adicione comentários
#ax.set_xticklabels([])
#ax.set_yticklabels([]);
plt.plot(x,x,'k')
plt.plot(x,-x+4,'k');
x = np.linspace(-np.pi, np.pi, 60)
y = np.sin(2*x)*np.cos(x/2)
plt.fill_between(x,y,alpha=0.5);
x = np.linspace(-np.pi, np.pi, 60)
f1 = np.sin(2*x) - 1
f2 = x**2
plt.plot(x,f1,c='w');
plt.plot(x,f2,c='w');
plt.fill_between(x,f1,f2,color='g',alpha=0.2);
| 0.428233 | 0.912045 |
## AWS Sentinel2 COG data extraction using STAC
Extract some data into xarray using STAC, visualize it with Holoviz
```
import numpy as np
import xarray as xr
import stackstac
import pystac_client
import hvplot.xarray
import panel as pn
```
Turn off some annoying warnings:
```
import warnings
warnings.filterwarnings("ignore")
import param
param.get_logger().setLevel(param.ERROR)
```
### Create a Dask cluster
To cut down on the execution time when accessing large amounts of data, we can use a Dask cluster to do the computation in parallel.
```
from dask.distributed import Client
client = Client()
client
#client.close();cluster.shutdown()
```
Using `pystac_client` we can search STAC endpoints for items matching our query parameters.
#### Explore what collections exist on a STAC API endpoint
```
stac_api_endpoint = 'https://earth-search.aws.element84.com/v0'
stac = pystac_client.Client.open(stac_api_endpoint)
for collection in stac.get_all_collections():
print(collection)
```
#### Search for data in collections
```
collections=["sentinel-s2-l2a-cogs"]
datetime = "2017-12-01/2018-01-01"
# bbox_lonlat = [40.09, -2.98, 40.61, -2.46] #africa
bbox_lonlat = [151.2957545, -33.7390216, 151.312234, -33.7012561] # AUS, coastsat
cloud_max = 60
search = stac.search(
bbox=bbox_lonlat,
datetime=datetime,
collections=collections,
limit=500, # fetch items in batches of 500
query={"eo:cloud_cover": {"lt": cloud_max}},
)
items = list(search.get_items())
print(len(items))
items_as_dict = [item.to_dict() for item in items]
```
#### Use StackStac to open the items in xarray
```
da = (
stackstac.stack(
items_as_dict,
bounds_latlon = bbox_lonlat,
assets=["B04", "B03", "B02"], # red, green, blue
chunksize=4096,
resolution=10,
)
.where(lambda x: x > 0, other=np.nan) # sentinel-2 uses 0 as nodata
# .assign_coords(band=lambda x: x.common_name.rename("band")) # use common names
)
da
```
### Visualize with Holoviz
Holoviz doesn't like all the extra coordinates, so drop everything except 'x','y','time', and 'band'. And scale the R,G,B data to between [0,1]
```
drop_coords = [x for x in list(da.coords) if not x in ['x','y','time','band']]
da = da.drop_vars(drop_coords)
dmean = float(da.mean())
dstd = float(da.std())
vmin = max(dmean - 2*dstd,0)
vmax = dmean + 2*dstd
print(vmax)
da2 = da/vmax
da2.hvplot.rgb(x='x', y='y', bands='band', crs=32756, rasterize=True,
frame_width=200, widgets={'time': pn.widgets.Select})
```
|
github_jupyter
|
import numpy as np
import xarray as xr
import stackstac
import pystac_client
import hvplot.xarray
import panel as pn
import warnings
warnings.filterwarnings("ignore")
import param
param.get_logger().setLevel(param.ERROR)
from dask.distributed import Client
client = Client()
client
#client.close();cluster.shutdown()
stac_api_endpoint = 'https://earth-search.aws.element84.com/v0'
stac = pystac_client.Client.open(stac_api_endpoint)
for collection in stac.get_all_collections():
print(collection)
collections=["sentinel-s2-l2a-cogs"]
datetime = "2017-12-01/2018-01-01"
# bbox_lonlat = [40.09, -2.98, 40.61, -2.46] #africa
bbox_lonlat = [151.2957545, -33.7390216, 151.312234, -33.7012561] # AUS, coastsat
cloud_max = 60
search = stac.search(
bbox=bbox_lonlat,
datetime=datetime,
collections=collections,
limit=500, # fetch items in batches of 500
query={"eo:cloud_cover": {"lt": cloud_max}},
)
items = list(search.get_items())
print(len(items))
items_as_dict = [item.to_dict() for item in items]
da = (
stackstac.stack(
items_as_dict,
bounds_latlon = bbox_lonlat,
assets=["B04", "B03", "B02"], # red, green, blue
chunksize=4096,
resolution=10,
)
.where(lambda x: x > 0, other=np.nan) # sentinel-2 uses 0 as nodata
# .assign_coords(band=lambda x: x.common_name.rename("band")) # use common names
)
da
drop_coords = [x for x in list(da.coords) if not x in ['x','y','time','band']]
da = da.drop_vars(drop_coords)
dmean = float(da.mean())
dstd = float(da.std())
vmin = max(dmean - 2*dstd,0)
vmax = dmean + 2*dstd
print(vmax)
da2 = da/vmax
da2.hvplot.rgb(x='x', y='y', bands='band', crs=32756, rasterize=True,
frame_width=200, widgets={'time': pn.widgets.Select})
| 0.317638 | 0.839076 |
# Difference between two netcdf files
This is a short notebook that will allow you to compare the values between two netcdf files and will produce a netcdf file with the differences as well as a small figure.
## Instructions
1. Upload the 2 data files to the current directory
1. Change global variables to desired values
1. Run all cells
## Set variables
```
# Input files
FILENAME1 = os.path.join(".", "FILENAME_HERE.nc")
FILENAME2 = os.path.join(".", "FILENAME_HERE.nc")
# Outputfile
OUTPUT_FILE = os.path.join(".", "differences.nc")
```
## Necessary imports
```
# Os manipulations mainly path
import os
# NetCDF loading and manip
import xarray as xr
# Adding holoviz support to xarray
import hvplot.xarray
# Basic matplotlib graphing
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from matplotlib.colors import LinearSegmentedColormap
```
## Some custom colormaps
```
fzcmap = LinearSegmentedColormap.from_list("my_colormap", ((0,0.2,0.4),(0,0.2,0.4),'slateblue','deepskyblue','aqua','antiquewhite','yellow','darkorange','red',(0.54,0,0),(0.54,0,0)), N=256, gamma=1.0)
blackcmap = LinearSegmentedColormap.from_list("my_colormap", ('k', 'k'), N=256, gamma=1.0)
landseacmap = LinearSegmentedColormap.from_list("my_colormap", ((0.22,0.22,0.56),(0.38,0.49,0.99),(0.51,0.99,1),(0.01,0.51,0),(0.7,0.7,0)), N=256, gamma=1.0)
bathycmap = LinearSegmentedColormap.from_list("my_colormap", ((0.1601562, 0.28125, 0.703125),(0.28125, 0.484375, 0.859375),(0.8671875, 0.9101562, 0.9804688)), N=256, gamma=1.0)
```
## Load the data
```
ds1 = xr.open_dataset(FILENAME1)
ds2 = xr.open_dataset(FILENAME2)
```
## Quick Data overview
```
ds1
ds2
```
## Simple plotting
```
ds1.TPP_ALLK.hvplot.image(cmap=fzcmap)
ds2.TPP_ALLK.hvplot.image()
```
## Calculate the difference
```
# The TIME_COUNTER are different for each dataset but as we only have one array we will drop the values
difference = (ds1.drop('TIME_COUNTER').TPP_ALLK - ds2.drop('TIME_COUNTER').TPP_ALLK)
difference.hvplot()
```
## Write the data to disk
```
difference.to_netcdf(OUTPUT_FILE)
```
## Make a standard plot
```
plt.figure(figsize=(6, 4.5))
ax = plt.axes(projection=ccrs.Robinson())
# ax.gridlines()
cf = difference.plot(transform=ccrs.PlateCarree(), ax=ax, cmap=fzcmap, add_colorbar=False);
# ax.set(aspect=2)
# ax.set_aspect('auto')
ax.tick_params(labelsize=15)
# ax.set_title('Plot Anomaly', weight='normal', fontsize=15)
# plt.title('Plot Anomaly', weight='normal', fontsize=15);
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
linewidth=.5, color='k', alpha=1., linestyle='-', zorder=999)
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlines = True
gl.xlocator = mticker.FixedLocator([-180, -120, -60, 0, 60, 120, 180])
gl.xformatter = LONGITUDE_FORMATTER
gl.ylocator = mticker.FixedLocator([-90, -60, -30, 0, 30, 60, 90])
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 15, 'color': 'gray'}
gl.xlabel_style = {'color': 'red', 'weight': 'bold'}
cb = plt.colorbar(cf, orientation='horizontal',extend='both')
cf.cmap.set_under('darkblue')
cf.cmap.set_over('sienna')
cb.ax.tick_params(labelsize=15)
cb.ax.set_title('Prod Anomaly', weight='normal', fontsize=15)
ax.outline_patch.set_linewidth(1.6)
ax.outline_patch.set_zorder(3000)
# plt.tight_layout()
# plt.savefig(fname,format='png', dpi=450)
# ax.outline_patch.set_linewidth(1.6)
# ax.outline_patch.set_zorder(3000)
plt.savefig('anomaly.tif', dpi=450)
```
|
github_jupyter
|
# Input files
FILENAME1 = os.path.join(".", "FILENAME_HERE.nc")
FILENAME2 = os.path.join(".", "FILENAME_HERE.nc")
# Outputfile
OUTPUT_FILE = os.path.join(".", "differences.nc")
# Os manipulations mainly path
import os
# NetCDF loading and manip
import xarray as xr
# Adding holoviz support to xarray
import hvplot.xarray
# Basic matplotlib graphing
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from matplotlib.colors import LinearSegmentedColormap
fzcmap = LinearSegmentedColormap.from_list("my_colormap", ((0,0.2,0.4),(0,0.2,0.4),'slateblue','deepskyblue','aqua','antiquewhite','yellow','darkorange','red',(0.54,0,0),(0.54,0,0)), N=256, gamma=1.0)
blackcmap = LinearSegmentedColormap.from_list("my_colormap", ('k', 'k'), N=256, gamma=1.0)
landseacmap = LinearSegmentedColormap.from_list("my_colormap", ((0.22,0.22,0.56),(0.38,0.49,0.99),(0.51,0.99,1),(0.01,0.51,0),(0.7,0.7,0)), N=256, gamma=1.0)
bathycmap = LinearSegmentedColormap.from_list("my_colormap", ((0.1601562, 0.28125, 0.703125),(0.28125, 0.484375, 0.859375),(0.8671875, 0.9101562, 0.9804688)), N=256, gamma=1.0)
ds1 = xr.open_dataset(FILENAME1)
ds2 = xr.open_dataset(FILENAME2)
ds1
ds2
ds1.TPP_ALLK.hvplot.image(cmap=fzcmap)
ds2.TPP_ALLK.hvplot.image()
# The TIME_COUNTER are different for each dataset but as we only have one array we will drop the values
difference = (ds1.drop('TIME_COUNTER').TPP_ALLK - ds2.drop('TIME_COUNTER').TPP_ALLK)
difference.hvplot()
difference.to_netcdf(OUTPUT_FILE)
plt.figure(figsize=(6, 4.5))
ax = plt.axes(projection=ccrs.Robinson())
# ax.gridlines()
cf = difference.plot(transform=ccrs.PlateCarree(), ax=ax, cmap=fzcmap, add_colorbar=False);
# ax.set(aspect=2)
# ax.set_aspect('auto')
ax.tick_params(labelsize=15)
# ax.set_title('Plot Anomaly', weight='normal', fontsize=15)
# plt.title('Plot Anomaly', weight='normal', fontsize=15);
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
linewidth=.5, color='k', alpha=1., linestyle='-', zorder=999)
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlines = True
gl.xlocator = mticker.FixedLocator([-180, -120, -60, 0, 60, 120, 180])
gl.xformatter = LONGITUDE_FORMATTER
gl.ylocator = mticker.FixedLocator([-90, -60, -30, 0, 30, 60, 90])
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 15, 'color': 'gray'}
gl.xlabel_style = {'color': 'red', 'weight': 'bold'}
cb = plt.colorbar(cf, orientation='horizontal',extend='both')
cf.cmap.set_under('darkblue')
cf.cmap.set_over('sienna')
cb.ax.tick_params(labelsize=15)
cb.ax.set_title('Prod Anomaly', weight='normal', fontsize=15)
ax.outline_patch.set_linewidth(1.6)
ax.outline_patch.set_zorder(3000)
# plt.tight_layout()
# plt.savefig(fname,format='png', dpi=450)
# ax.outline_patch.set_linewidth(1.6)
# ax.outline_patch.set_zorder(3000)
plt.savefig('anomaly.tif', dpi=450)
| 0.465873 | 0.870046 |
```
from __future__ import unicode_literals, print_function
import plac
import random
from pathlib import Path
import pandas as pd
import numpy as np
import math
import thinc.extra.datasets
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import spacy
from spacy.util import minibatch, compounding, decaying
n_iter = 20
def get_data(validation_ratio=0.1):
df_trn = pd.read_csv('germeval2018.training.txt', sep='\t', header=None, names=['text', 'bin', 'detail']).drop('detail', axis=1)
idx = np.arange(len(df_trn))
np.random.shuffle(idx)
val_size = math.ceil(len(df_trn) * validation_ratio)
val_df = df_trn.iloc[idx[:val_size]]
trn_df = df_trn.iloc[idx[val_size:]]
trn_labels = [{'OFFENSE': x == 'OFFENSE'} for x in trn_df['bin'].values]
val_labels = [{'OFFENSE': x == 'OFFENSE'} for x in val_df['bin'].values]
return (tuple(trn_df['text'].values), trn_labels), (tuple(val_df['text'].values), val_labels)
def evaluate(tokenizer, textcat, texts, cats):
docs = (tokenizer(text) for text in texts)
tp = 1e-8 # True positives
fp = 1e-8 # False positives
fn = 1e-8 # False negatives
tn = 1e-8 # True negatives
for i, doc in enumerate(textcat.pipe(docs)):
gold = cats[i]
for label, score in doc.cats.items():
if label not in gold:
continue
if score >= 0.5 and gold[label] >= 0.5:
tp += 1.
elif score >= 0.5 and gold[label] < 0.5:
fp += 1.
elif score < 0.5 and gold[label] < 0.5:
tn += 1
elif score < 0.5 and gold[label] >= 0.5:
fn += 1
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f_score = 2 * (precision * recall) / (precision + recall)
return {'textcat_p': precision, 'textcat_r': recall, 'textcat_f': f_score}
(train_texts, train_cats), (dev_texts, dev_cats) = get_data()
#print(f'text: {train_texts[:2]}')
#print(f'cats: {train_cats[:2]}')
#print(f'cats: {dev_cats[:2]}')
print("Using examples ({} training, {} evaluation)".format(len(train_texts), len(dev_texts)))
train_data = list(zip(train_texts, [{'cats': cats} for cats in train_cats]))
train_data[3:4]
class TwitterHandleMatcher(object):
def __init__(self, nlp):
self.pos_value = 95 # PROPN
def __call__(self, doc):
for i, t in enumerate(doc):
if t.text.startswith('@'):
span = doc[i:i+1]
span.merge(pos=self.pos_value)
return doc
#nlp = spacy.load('de_core_news_md')
nlp = spacy.load('/Users/michel/innoq/machinelearning/germeval2018/twitter_vec_200')
DEFAULT_PIPES = ['tagger', 'parser', 'ner']
for n in DEFAULT_PIPES:
p = nlp.create_pipe(n)
nlp.add_pipe(p, last=True)
print(f'loaded model {nlp.lang}')
twitter = TwitterHandleMatcher(nlp)
nlp.add_pipe(twitter, after='tagger')
if 'textcat' not in nlp.pipe_names:
textcat = nlp.create_pipe('textcat')
nlp.add_pipe(textcat, last=True)
else:
textcat = nlp.get_pipe('textcat')
textcat.add_label('OFFENSE')
print(f'pipeline {nlp.pipe_names}')
#doc = nlp(train_texts[3])
%xmode Verbose
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat']
with nlp.disable_pipes(*other_pipes): # only train textcat
optimizer = nlp.begin_training()
optimizer.max_grad_norm = 0.6
print("Training the model...")
print('\t{:^5}\t{:^5}\t{:^5}\t{:^5}'.format('LOSS', 'P', 'R', 'F'))
best_f = 0
last_f = 0
n_iter_nogain = 0
dropout = decaying(0.45, 0.2, 1e-4)
for i in range(n_iter):
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(train_data, size=compounding(1, 8, 1.01))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=next(dropout), losses=losses)
with textcat.model.use_params(optimizer.averages):
# evaluate on the dev data split off in load_data()
scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)
if scores['textcat_f'] > best_f:
best_f = scores['textcat_f']
n_iter_nogain = 0
elif scores['textcat_f'] > last_f:
n_iter_nogain = 0
else:
n_iter_nogain += 1
last_f = scores['textcat_f']
print('{4}\t{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}' # print a simple table
.format(losses['textcat'], scores['textcat_p'],
scores['textcat_r'], scores['textcat_f'], i))
if n_iter_nogain > 3:
print('early stopping')
break
```
|
github_jupyter
|
from __future__ import unicode_literals, print_function
import plac
import random
from pathlib import Path
import pandas as pd
import numpy as np
import math
import thinc.extra.datasets
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import spacy
from spacy.util import minibatch, compounding, decaying
n_iter = 20
def get_data(validation_ratio=0.1):
df_trn = pd.read_csv('germeval2018.training.txt', sep='\t', header=None, names=['text', 'bin', 'detail']).drop('detail', axis=1)
idx = np.arange(len(df_trn))
np.random.shuffle(idx)
val_size = math.ceil(len(df_trn) * validation_ratio)
val_df = df_trn.iloc[idx[:val_size]]
trn_df = df_trn.iloc[idx[val_size:]]
trn_labels = [{'OFFENSE': x == 'OFFENSE'} for x in trn_df['bin'].values]
val_labels = [{'OFFENSE': x == 'OFFENSE'} for x in val_df['bin'].values]
return (tuple(trn_df['text'].values), trn_labels), (tuple(val_df['text'].values), val_labels)
def evaluate(tokenizer, textcat, texts, cats):
docs = (tokenizer(text) for text in texts)
tp = 1e-8 # True positives
fp = 1e-8 # False positives
fn = 1e-8 # False negatives
tn = 1e-8 # True negatives
for i, doc in enumerate(textcat.pipe(docs)):
gold = cats[i]
for label, score in doc.cats.items():
if label not in gold:
continue
if score >= 0.5 and gold[label] >= 0.5:
tp += 1.
elif score >= 0.5 and gold[label] < 0.5:
fp += 1.
elif score < 0.5 and gold[label] < 0.5:
tn += 1
elif score < 0.5 and gold[label] >= 0.5:
fn += 1
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f_score = 2 * (precision * recall) / (precision + recall)
return {'textcat_p': precision, 'textcat_r': recall, 'textcat_f': f_score}
(train_texts, train_cats), (dev_texts, dev_cats) = get_data()
#print(f'text: {train_texts[:2]}')
#print(f'cats: {train_cats[:2]}')
#print(f'cats: {dev_cats[:2]}')
print("Using examples ({} training, {} evaluation)".format(len(train_texts), len(dev_texts)))
train_data = list(zip(train_texts, [{'cats': cats} for cats in train_cats]))
train_data[3:4]
class TwitterHandleMatcher(object):
def __init__(self, nlp):
self.pos_value = 95 # PROPN
def __call__(self, doc):
for i, t in enumerate(doc):
if t.text.startswith('@'):
span = doc[i:i+1]
span.merge(pos=self.pos_value)
return doc
#nlp = spacy.load('de_core_news_md')
nlp = spacy.load('/Users/michel/innoq/machinelearning/germeval2018/twitter_vec_200')
DEFAULT_PIPES = ['tagger', 'parser', 'ner']
for n in DEFAULT_PIPES:
p = nlp.create_pipe(n)
nlp.add_pipe(p, last=True)
print(f'loaded model {nlp.lang}')
twitter = TwitterHandleMatcher(nlp)
nlp.add_pipe(twitter, after='tagger')
if 'textcat' not in nlp.pipe_names:
textcat = nlp.create_pipe('textcat')
nlp.add_pipe(textcat, last=True)
else:
textcat = nlp.get_pipe('textcat')
textcat.add_label('OFFENSE')
print(f'pipeline {nlp.pipe_names}')
#doc = nlp(train_texts[3])
%xmode Verbose
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat']
with nlp.disable_pipes(*other_pipes): # only train textcat
optimizer = nlp.begin_training()
optimizer.max_grad_norm = 0.6
print("Training the model...")
print('\t{:^5}\t{:^5}\t{:^5}\t{:^5}'.format('LOSS', 'P', 'R', 'F'))
best_f = 0
last_f = 0
n_iter_nogain = 0
dropout = decaying(0.45, 0.2, 1e-4)
for i in range(n_iter):
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(train_data, size=compounding(1, 8, 1.01))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=next(dropout), losses=losses)
with textcat.model.use_params(optimizer.averages):
# evaluate on the dev data split off in load_data()
scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)
if scores['textcat_f'] > best_f:
best_f = scores['textcat_f']
n_iter_nogain = 0
elif scores['textcat_f'] > last_f:
n_iter_nogain = 0
else:
n_iter_nogain += 1
last_f = scores['textcat_f']
print('{4}\t{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}' # print a simple table
.format(losses['textcat'], scores['textcat_p'],
scores['textcat_r'], scores['textcat_f'], i))
if n_iter_nogain > 3:
print('early stopping')
break
| 0.336549 | 0.214075 |
# Grove Temperature Sensor 1.2
This example shows how to use the [Grove Temperature Sensor v1.2](http://www.seeedstudio.com/wiki/Grove_-_Temperature_Sensor_V1.2) on the Pynq-Z1 board. You will also see how to plot a graph using matplotlib. The Grove Temperature sensor produces an analog signal, and requires an ADC.
A [Grove Temperature sensor](http://www.seeedstudio.com/depot/grove-led-bar-p-1178.html) and Pynq Grove Adapter, or Pynq Shield is required. The Grove Temperature Sensor, Pynq Grove Adapter, and Grove I2C ADC are used for this example.
You can read a single value of temperature or read multiple values at regular intervals for a desired duration.
At the end of this notebook, a Python only solution with single-sample read functionality is provided.
### 1. Load overlay
```
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
```
### 2. Read single temperature
This example shows on how to get a single temperature sample from the Grove TMP sensor.
The Grove ADC is assumed to be attached to the GR4 connector of the StickIt. The StickIt module is assumed to be plugged in the 1st PMOD labeled JB. The Grove TMP sensor is connected to the other connector of the Grove ADC.
Grove ADC provides a raw sample which is converted into resistance first and then converted into temperature.
```
import math
from pynq.lib.pmod import Grove_TMP
from pynq.lib.pmod import PMOD_GROVE_G4
tmp = Grove_TMP(base.PMODB,PMOD_GROVE_G4)
#print(tmp.reset())
temperature = tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
```
### 3. Start logging once every 100ms for 10 seconds
Executing the next cell will start logging the temperature sensor values every 100ms, and will run for 10s. You can try touch/hold the temperature sensor to vary the measured temperature.
You can vary the logging interval and the duration by changing the values 100 and 10 in the cellbelow. The raw samples are stored in the internal memory, and converted into temperature values.
```
import time
%matplotlib inline
import matplotlib.pyplot as plt
tmp.set_log_interval_ms(100)
tmp.start_log()
# Change input during this time
time.sleep(10)
tmp_log = tmp.get_log()
plt.plot(range(len(tmp_log)), tmp_log, 'ro')
plt.title('Grove Temperature Plot')
min_tmp_log = min(tmp_log)
max_tmp_log = max(tmp_log)
plt.axis([0, len(tmp_log), min_tmp_log, max_tmp_log])
plt.show()
```
### 4. A Pure Python class to exercise the AXI IIC Controller inheriting from PMOD_IIC
This class is ported from http://www.seeedstudio.com/wiki/Grove_-_Temperature_Sensor.
```
from time import sleep
from math import log
from pynq.lib.pmod import PMOD_GROVE_G3
from pynq.lib.pmod import PMOD_GROVE_G4
from pynq.lib import Pmod_IIC
class Python_Grove_TMP(Pmod_IIC):
"""This class controls the grove temperature sensor.
This class inherits from the PMODIIC class.
Attributes
----------
iop : _IOP
The _IOP object returned from the DevMode.
scl_pin : int
The SCL pin number.
sda_pin : int
The SDA pin number.
iic_addr : int
The IIC device address.
"""
def __init__(self, pmod_id, gr_pins, model = 'v1.2'):
"""Return a new instance of a grove OLED object.
Parameters
----------
pmod_id : int
The PMOD ID (1, 2) corresponding to (PMODA, PMODB).
gr_pins: list
The group pins on Grove Adapter. G3 or G4 is valid.
model : string
Temperature sensor model (can be found on the device).
"""
if gr_pins in [PMOD_GROVE_G3, PMOD_GROVE_G4]:
[scl_pin,sda_pin] = gr_pins
else:
raise ValueError("Valid group numbers are G3 and G4.")
# Each revision has its own B value
if model == 'v1.2':
# v1.2 uses thermistor NCP18WF104F03RC
self.bValue = 4250
elif model == 'v1.1':
# v1.1 uses thermistor NCP18WF104F03RC
self.bValue = 4250
else:
# v1.0 uses thermistor TTC3A103*39H
self.bValue = 3975
super().__init__(pmod_id, scl_pin, sda_pin, 0x50)
# Initialize the Grove ADC
self.send([0x2,0x20]);
def read(self):
"""Read temperature in Celsius from grove temperature sensor.
Parameters
----------
None
Returns
-------
float
Temperature reading in Celsius.
"""
val = self._read_grove_adc()
R = 4095.0/val - 1.0
temp = 1.0/(log(R)/self.bValue + 1/298.15)-273.15
return temp
def _read_grove_adc(self):
self.send([0])
bytes = self.receive(2)
return 2*(((bytes[0] & 0x0f) << 8) | bytes[1])
from pynq import PL
# Flush IOP state
PL.reset()
py_tmp = Python_Grove_TMP(base.PMODB, PMOD_GROVE_G4)
temperature = py_tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
```
|
github_jupyter
|
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
import math
from pynq.lib.pmod import Grove_TMP
from pynq.lib.pmod import PMOD_GROVE_G4
tmp = Grove_TMP(base.PMODB,PMOD_GROVE_G4)
#print(tmp.reset())
temperature = tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
import time
%matplotlib inline
import matplotlib.pyplot as plt
tmp.set_log_interval_ms(100)
tmp.start_log()
# Change input during this time
time.sleep(10)
tmp_log = tmp.get_log()
plt.plot(range(len(tmp_log)), tmp_log, 'ro')
plt.title('Grove Temperature Plot')
min_tmp_log = min(tmp_log)
max_tmp_log = max(tmp_log)
plt.axis([0, len(tmp_log), min_tmp_log, max_tmp_log])
plt.show()
from time import sleep
from math import log
from pynq.lib.pmod import PMOD_GROVE_G3
from pynq.lib.pmod import PMOD_GROVE_G4
from pynq.lib import Pmod_IIC
class Python_Grove_TMP(Pmod_IIC):
"""This class controls the grove temperature sensor.
This class inherits from the PMODIIC class.
Attributes
----------
iop : _IOP
The _IOP object returned from the DevMode.
scl_pin : int
The SCL pin number.
sda_pin : int
The SDA pin number.
iic_addr : int
The IIC device address.
"""
def __init__(self, pmod_id, gr_pins, model = 'v1.2'):
"""Return a new instance of a grove OLED object.
Parameters
----------
pmod_id : int
The PMOD ID (1, 2) corresponding to (PMODA, PMODB).
gr_pins: list
The group pins on Grove Adapter. G3 or G4 is valid.
model : string
Temperature sensor model (can be found on the device).
"""
if gr_pins in [PMOD_GROVE_G3, PMOD_GROVE_G4]:
[scl_pin,sda_pin] = gr_pins
else:
raise ValueError("Valid group numbers are G3 and G4.")
# Each revision has its own B value
if model == 'v1.2':
# v1.2 uses thermistor NCP18WF104F03RC
self.bValue = 4250
elif model == 'v1.1':
# v1.1 uses thermistor NCP18WF104F03RC
self.bValue = 4250
else:
# v1.0 uses thermistor TTC3A103*39H
self.bValue = 3975
super().__init__(pmod_id, scl_pin, sda_pin, 0x50)
# Initialize the Grove ADC
self.send([0x2,0x20]);
def read(self):
"""Read temperature in Celsius from grove temperature sensor.
Parameters
----------
None
Returns
-------
float
Temperature reading in Celsius.
"""
val = self._read_grove_adc()
R = 4095.0/val - 1.0
temp = 1.0/(log(R)/self.bValue + 1/298.15)-273.15
return temp
def _read_grove_adc(self):
self.send([0])
bytes = self.receive(2)
return 2*(((bytes[0] & 0x0f) << 8) | bytes[1])
from pynq import PL
# Flush IOP state
PL.reset()
py_tmp = Python_Grove_TMP(base.PMODB, PMOD_GROVE_G4)
temperature = py_tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
| 0.588653 | 0.972908 |
## The 2D diffusion equation on GPUs, in minutes
This notebook implements, for a given initial density profile, a solver for the 2D diffusion equation using an explicit finite difference scheme with 'do-nothing' conditions on the boundaries (and hence will not provide a reasonable solution once the profile has diffused to a boundary).
```
# Some imports we will need below
import numpy as np
from devito import *
import matplotlib.pyplot as plt
%matplotlib inline
```
### Solver implementation
We start by creating a Cartesian `Grid` representing the computational domain:
```
nx, ny = 100, 100
grid = Grid(shape=(nx, ny))
```
To represent the density, we use a `TimeFunction` -- a scalar, discrete function encapsulating space- and time-varying data. We also use a `Constant` for the diffusion coefficient.
```
u = TimeFunction(name='u', grid=grid, space_order=2, save=200)
c = Constant(name='c')
```
The 2D diffusion equation is expressed as:
```
eqn = Eq(u.dt, c * u.laplace)
```
From this diffusion equation we derive our time-marching method -- at each timestep, we compute `u` at timestep `t+1`, which in the Devito language is represented by `u.forward`. Hence:
```
step = Eq(u.forward, solve(eqn, u.forward))
```
OK, it's time to let Devito generate code for our solver!
```
op = Operator([step])
```
Before executing the `Operator` we must first specify the initial density profile. Here, we place a "ring" with a constant density value in the center of the domain.
```
xx, yy = np.meshgrid(np.linspace(0., 1., nx, dtype=np.float32),
np.linspace(0., 1., ny, dtype=np.float32))
r = (xx - .5)**2. + (yy - .5)**2.
# Inserting the ring
u.data[0, np.logical_and(.05 <= r, r <= .1)] = 1.
```
We're now ready to execute the `Operator`. We run it with a diffusion coefficient of 0.5 and for a carefully chosen `dt`. Unless specified otherwise, the simulation runs for 199 timesteps as specified in the definition of `u` (i.e. the function was defined with `save=200` the initial data + 199 new timesteps).
```
stats = op.apply(dt=5e-05, c=0.5)
```
### Initial conditions and snapshots every 40 timesteps
```
plt.rcParams['figure.figsize'] = (20, 20)
for i in range(1, 6):
plt.subplot(1, 6, i)
plt.imshow(u.data[(i-1)*40])
plt.show()
```
### GPU-parallel solver
Let us now generate a GPU implementation of the same solver. It's actually straightforward!
```
# Instead of `platform=nvidiaX`, you may run your Python code with
# the environment variable `DEVITO_PLATFORM=nvidiaX`
# We also need the `gpu-fit` option to tell Devito that `u` will definitely
# fit in the GPU memory. This is necessary every time a TimeFunction with
# `save != None` is used. Otherwise, Devito could generate code such that
# `u` gets streamed between the CPU and the GPU, but for this advanced
# feature you will need `devitopro`.
op = Operator([step], platform='nvidiaX', opt=('advanced', {'gpu-fit': u}))
```
**That's it!** We can now run it exactly as before
```
# Uncomment and run only if Devito was installed with GPU support.
# stats = op.apply(dt=5e-05, c=0.5)
```
We should see a big performance difference between the two runs. We can also inspect `op` to see what Devito has generated to run on the GPU
```
print(op)
```
|
github_jupyter
|
# Some imports we will need below
import numpy as np
from devito import *
import matplotlib.pyplot as plt
%matplotlib inline
nx, ny = 100, 100
grid = Grid(shape=(nx, ny))
u = TimeFunction(name='u', grid=grid, space_order=2, save=200)
c = Constant(name='c')
eqn = Eq(u.dt, c * u.laplace)
step = Eq(u.forward, solve(eqn, u.forward))
op = Operator([step])
xx, yy = np.meshgrid(np.linspace(0., 1., nx, dtype=np.float32),
np.linspace(0., 1., ny, dtype=np.float32))
r = (xx - .5)**2. + (yy - .5)**2.
# Inserting the ring
u.data[0, np.logical_and(.05 <= r, r <= .1)] = 1.
stats = op.apply(dt=5e-05, c=0.5)
plt.rcParams['figure.figsize'] = (20, 20)
for i in range(1, 6):
plt.subplot(1, 6, i)
plt.imshow(u.data[(i-1)*40])
plt.show()
# Instead of `platform=nvidiaX`, you may run your Python code with
# the environment variable `DEVITO_PLATFORM=nvidiaX`
# We also need the `gpu-fit` option to tell Devito that `u` will definitely
# fit in the GPU memory. This is necessary every time a TimeFunction with
# `save != None` is used. Otherwise, Devito could generate code such that
# `u` gets streamed between the CPU and the GPU, but for this advanced
# feature you will need `devitopro`.
op = Operator([step], platform='nvidiaX', opt=('advanced', {'gpu-fit': u}))
# Uncomment and run only if Devito was installed with GPU support.
# stats = op.apply(dt=5e-05, c=0.5)
print(op)
| 0.532911 | 0.984561 |
# 量子変分線型ソルバー(Valiational Quantum Linear Solver)
```
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import Aer, execute
import math
import random
import numpy as np
from scipy.optimize import minimize
```
## 1. はじめに
線型変分量子ソルバー(Variational Quantum Linear Solver:VQLS)は、古典コンピューターよりも効率的に連立一次方程式を解くためにVQEを活用する変分量子アルゴリズムです。特に、 既知のベクトル$|\textbf{b}\rangle$ に対して、$\textbf{A} |\textbf{x}\rangle = |\textbf{b}\rangle$を満たす行列$\textbf{A}$が与えられたときに、VQLSアルゴリズムは理論的に、上記の関係式を満たすような$|\textbf{x}\rangle$に比例する規格化された$|x\rangle$を発見することができます。
このアルゴリズムの出力は、HHLアルゴリズムのものと同一で、HHLはVQLSよりもはるかにスピードアップが見込まれますが、VQLSはNISQ量子コンピューター上での稼働を可能にしている一方、HHLは多くの量子ビットを搭載していて、十分なエラー耐性が実現している量子ハードウェアを必要とします。
## 2. アルゴリズム
最初に、このアルゴリズムの入力は、複素係数で線型結合されたユニタリ行列に分解できる行列$\textbf{A}$とします。
$$A \ = \ \displaystyle\sum_{n} c_n \ A_n$$
各$A_n$はユニタリとして、状態$|0\rangle$から状態$|\textbf{b}\rangle$に遷移するユニタリ行列を$U$とします。ここで、変分量子アルゴリズムの一般的な構造を思い出してください。低深度のパラメーター化された量子回路で評価し、古典的オプティマイザーに出力するような、量子コスト関数を構成しなければいけません。これにより、$|\psi(\alpha)\rangle \ = \ \frac{|\textbf{x}\rangle}{|| \textbf{x} ||}$であるような、パラメーターセット$\alpha$をパラメーター空間から探すことができるようになります。ここで、 $|\psi(k)\rangle$ は、あるパラメーターセット$k$における量子回路の出力になります。
実際のコスト関数を構成する前に、原論文に記載されている画像を元に、このアルゴリズムにおけるサブルーチンの俯瞰した概要を見てみましょう。

各量子ビットが$|0\rangle$に初期化されている量子ビットレジスタから始めます。このアルゴリズムは、入力値を取り入れて、コスト関数を準備し、評価し、いくつかの補助量子ビット状態を生成します。計算したコストが、あるパラメーター$\gamma$よりも大きい場合、このアルゴリズムは、パラメーターを更新して再度実行します。そうでない場合は、アルゴリズムは処理を停止し、補助量子ビット状態は最適なパラメーターによって計算されます。これにより、コスト関数を最小化する状態ベクトルが得られ、$|\textbf{x}\rangle$の規格化したベクトルを得ることができます。<br>
## 3. Qiskitでの実装
### 固定ハードウェア補助量子ビット状態(Fixed Hardware Ansatz)
任意の状態$|\psi(k)\rangle$を用意する、補助量子ビット状態$V(\alpha)$を考えることから始めてみましょう。これによって、パラメーター$k$を変動させることによって状態空間を“探索”することができるようになります。では、今回の実装に使う補助量子ビット状態を次のように与えてみましょう。
```
def apply_fixed_ansatz(qubits, parameters):
for iz in range (0, len(qubits)):
circ.ry(parameters[0][iz], qubits[iz])
circ.cz(qubits[0], qubits[1])
circ.cz(qubits[2], qubits[0])
for iz in range (0, len(qubits)):
circ.ry(parameters[1][iz], qubits[iz])
circ.cz(qubits[1], qubits[2])
circ.cz(qubits[2], qubits[0])
for iz in range (0, len(qubits)):
circ.ry(parameters[2][iz], qubits[iz])
circ = QuantumCircuit(3)
apply_fixed_ansatz([0, 1, 2], [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
circ.draw()
```
これは固定ハードウェア補助量子ビット状態(**fixed hardware ansatz**)と呼ばれます、量子ゲートの構成は、各実行において回路は同じであり、パラメーターが置き換わります。QAOAにおけるanstazとは異なり、トロッター化されたハミルトニアンでは構成されていません。$CZ$が異なる量子ビット間での\"干渉\"を起こす限り、$Ry$ゲートの応用によって、状態空間を探索できるようになります。
ここで、実際の**コスト関数**について考えてみましょう。このアルゴリズムの目的は、コストを最小化することです。つまり、$|\Phi\rangle = \textbf{A} |\psi(k)\rangle$が $|\textbf{b}\rangle$に非常に近い時にはコスト関数の出力をとても小さくし、逆に直交するようなベクトルであった場合にはコスト関数を大きくするようにします。そこで、射影ハミルトニアンを導入します。
$$H_P \ = \ \mathbb{I} \ - \ |b\rangle \langle b|$$
これは次のような性質を持ちます。
$$C_P \ = \ \langle \Phi | H_P | \Phi \rangle \ = \ \langle \Phi | (\mathbb{I} \ - \ |b\rangle \langle b|) |\Phi \rangle \ = \ \langle \Phi | \Phi \rangle \ - \ \langle \Phi |b\rangle \langle b | \Phi \rangle$$
第2項目から、$|b\rangle$に沿った $|\Phi\rangle$の\"適合度\"がわかります。次に、これを別の数値から差し引いて、$|\Phi\rangle$と$|b\rangle$ の内積が大きい場合(より一致する場合)、望ましい低い数を取得します。これだけでもとても良く見えます。しかし、このアルゴリズムの正確性をさらに向上させることができる方法がもう一つあります。これは、$|\Phi\rangle$が小さいノルムを持つと、$|\textbf{b}\rangle$に一致していなくても、コスト関数が小さい値になってしまうという事実です。それゆえ、$|\Phi\rangle$を$\frac{|\Phi\rangle}{\sqrt{\langle \Phi | \Phi \rangle}}$に置き換えると次のようになります。
$$\hat{C}_P \ = \ \frac{\langle \Phi | \Phi \rangle}{\langle \Phi | \Phi \rangle} - \frac{\langle \Phi |b\rangle \langle b | \Phi \rangle}{\langle \Phi | \Phi \rangle} \ = \ 1 \ - \ \frac{\langle \Phi |b\rangle \langle b | \Phi \rangle}{\langle \Phi | \Phi \rangle} \ = \ 1 \ - \ \frac{|\langle b | \Phi \rangle|^2}{\langle \Phi | \Phi \rangle}$$
これで、補助ビットによって$|\psi(k)\rangle$ を準備できました。ここから、コスト関数を計算するために、2つの値$|\langle b | \Phi \rangle|^2$と$\langle \Phi | \Phi \rangle$を求めていきます。幸運なことに、これらを計算するための量子サブルーチンとして、アダマールテストを使って計算することができます。本質的に、もし何らかのユニタリ演算子$U$と状態$|\phi\rangle$を持っていて、その状態に対する$U$の期待値 $\langle \phi | U | \phi \rangle$を求めたいとすると、その時、次の回路で見積ることができます。
<br><br>

<br><br>
このとき、最初の量子ビットが$0$と測定される確率は$\frac{1}{2} (1 \ + \ \text{Re}\langle U \rangle)$であり、$1$と測定される確率は$\frac{1}{2} (1 \ - \ \text{Re}\langle U \rangle)$となり、この2つの確率の差を取ることで$\text{Re} \langle U \rangle$を得ます。幸運なことに、このテストで扱おうとしているこの行列は、この特定の実装において、全要素が実数の行列、つまり$\text{Re} \langle U \rangle \ = \ \langle U \rangle$です。これは、アダマールテストがどのように動くかを示したものです。回路図によると、一般の状態ベクトルは次のようになります。
<br>
$$\frac{|0\rangle \ + \ |1\rangle}{\sqrt{2}} \ \otimes \ |\psi\rangle \ = \ \frac{|0\rangle \ \otimes \ |\psi\rangle \ + \ |1\rangle \ \otimes \ |\psi\rangle}{\sqrt{2}}$$
<br>
制御ユニタリゲートを作用させると、次のようになります:
<br>
$$\frac{|0\rangle \ \otimes \ |\psi\rangle \ + \ |1\rangle \ \otimes \ |\psi\rangle}{\sqrt{2}} \ \rightarrow \ \frac{|0\rangle \ \otimes \ |\psi\rangle \ + \ |1\rangle \ \otimes \ U|\psi\rangle}{\sqrt{2}}$$
<br>
最初の量子ビットにアダマールテストを作用させます:
<br>
$$\frac{|0\rangle \ \otimes \ |\psi\rangle \ + \ |1\rangle \ \otimes \ U|\psi\rangle}{\sqrt{2}} \ \rightarrow \ \frac{1}{2} \ \big[ |0\rangle \ \otimes \ |\psi\rangle \ + \ |1\rangle \ \otimes \ |\psi\rangle \ + \ |0\rangle \ \otimes \ U|\psi\rangle \ - \ |1\rangle \ \otimes \ U|\psi\rangle \big]$$
<br>
$$\Rightarrow \ |0\rangle \ \otimes \ (\mathbb{I} \ + \ U)|\psi\rangle \ + \ |1\rangle \ \otimes \ (\mathbb{I} \ - \ U)|\psi\rangle$$
<br>
最初の量子ビットに測定を行うと、これが$0$を測定する確率を見つけることを思い出すと、その状態と$|0\rangle$の内積を計算しなければならず、その時にそれの複素共役(複素共役に慣れていない場合は、量子力学の章を確認してください。)をかけることになります。同じことを$1$の測定についても行います。それによって、次の式を得ます。
<br>
$$P(0) \ = \ \frac{1}{4} \ \langle \psi | (\mathbb{I} \ + \ U) (\mathbb{I} \ + \ U^{\dagger}) |\psi\rangle \ = \ \frac{1}{4} \ \langle \psi | (\mathbb{I}^2 \ + U \ + \ U^{\dagger} \ + \ U^{\dagger} U) |\psi\rangle \ = \ \frac{1}{4} \ \langle \psi | (2\mathbb{I} \ + U \ + \ U^{\dagger}) |\psi\rangle$$
<br>
$$\Rightarrow \ \frac{1}{4} \Big[ 2 \ + \ \langle \psi | U^{\dagger} | \psi \rangle \ + \ \langle \psi | U | \psi \rangle \Big] \ = \ \frac{1}{4} \Big[ 2 \ + \ (\langle \psi | U | \psi \rangle)^{*} \ + \ \langle \psi | U | \psi \rangle \Big] \ = \ \frac{1}{2} (1 \ + \ \text{Re} \ \langle \psi | U | \psi \rangle)$$
<br>
同様の手順で、次の式を得ます:
<br>
$$P(1) \ = \ \frac{1}{2} \ (1 \ - \ \text{Re} \ \langle \psi | U | \psi \rangle)$$
<br>
それから、差分を取ります:
<br>
$$P(0) \ - \ P(1) \ = \ \text{Re} \ \langle \psi | U | \psi \rangle$$
<br>
素晴らしい!今、我々は計算しなければならない2つの値についてこれを実装できました。$\langle \Phi | \Phi \rangle$から始めて、我々は次の式を得ます。
<br>
$$\langle \Phi | \Phi \rangle \ = \ \langle \psi(k) | A^{\dagger} A |\psi(k) \rangle \ = \ \langle 0 | V(k)^{\dagger} A^{\dagger} A V(k) |0\rangle \ = \ \langle 0 | V(k)^{\dagger} \Big( \displaystyle\sum_{n} c_n \ A_n \Big)^{\dagger} \Big( \displaystyle\sum_{n} c_n \ A_n \Big) V(k) |0\rangle$$
<br>
$$\Rightarrow \ \langle \Phi | \Phi \rangle \ = \ \displaystyle\sum_{m} \displaystyle\sum_{n} c_m^{*} c_n \langle 0 | V(k)^{\dagger} A_m^{\dagger} A_n V(k) |0\rangle$$
<br>
そして、アダマールテストを使うことで、いかなる項$\langle 0 | V(k)^{\dagger} A_m^{\dagger} A_n V(k) |0\rangle$を計算することができるようになりました。これによって、我々は状態$V(k) |0\rangle$を用意して、ユニタリ行列$A_m^{\dagger}$と$A_n$に対していくつかの制御補助量子ビットと共に制御演算を行います。これをコードで実装します。
```
#アダマールテストを構成する
def had_test(gate_type, qubits, ancilla_index, parameters):
circ.h(ancilla_index)
apply_fixed_ansatz(qubits, parameters)
for ie in range (0, len(gate_type[0])):
if (gate_type[0][ie] == 1):
circ.cz(ancilla_index, qubits[ie])
for ie in range (0, len(gate_type[1])):
if (gate_type[1][ie] == 1):
circ.cz(ancilla_index, qubits[ie])
circ.h(ancilla_index)
circ = QuantumCircuit(4)
had_test([[0, 0, 0], [0, 0, 1]], [1, 2, 3], 0, [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
circ.draw()
```
我々が2つの異なるゲートタイプを適用しようとする理由は、これが$\langle \Phi | \Phi \rangle$の拡張形式の中に見れるゲートのペアを表現しているからです。
この実装の目的に注意しておくことも重要です。(実際に解く連立方程式は、ゲート$Z$と$\mathbb{I}$にだけ関心があり、したがってこれらのゲートのサポートのみを含みます。(そのコードでは、$0$に対する$\mathbb{I}$と$1$に対する$Z$)という異なるゲートの応用を意味する数字”識別子”を含みます。))
今、我々は計算すべき2つ目の値$|\langle b | \Phi \rangle|^2$に移ることができます。次の式を得ます。
<br>
$$|\langle b | \Phi \rangle|^2 \ = \ |\langle b | A V(k) | 0 \rangle|^2 \ = \ |\langle 0 | U^{\dagger} A V(k) | 0 \rangle|^2 \ = \ \langle 0 | U^{\dagger} A V(k) | 0 \rangle \langle 0 | V(k)^{\dagger} A^{\dagger} U |0\rangle$$
<br>
今しなければならないことは、積$\langle 0 | U^{\dagger} A V(k) | 0 \rangle \langle 0 | V(k)^{\dagger} A^{\dagger} U |0\rangle$に対して以前と同じ拡張です。
<br>
$$\langle 0 | U^{\dagger} A V(k) | 0 \rangle^2 \ = \ \displaystyle\sum_{m} \displaystyle\sum_{n} c_m^{*} c_n \langle 0 | U^{\dagger} A_n V(k) | 0 \rangle \langle 0 | V(k)^{\dagger} A_m^{\dagger} U |0\rangle$$
<br>
今再び、このデモの目的として、我々が実装した全ての出力/期待値が実数になることを見ることになります。つまり、次の式を得ます。
<br>
$$\Rightarrow \ \langle 0 | U^{\dagger} A V(k) | 0 \rangle \ = \ (\langle 0 | U^{\dagger} A V(k) | 0 \rangle)^{*} \ = \ \langle 0 | V(k)^{\dagger} A^{\dagger} U |0\rangle$$
<br>
故に、この特定の実装では
<br>
$$|\langle b | \Phi \rangle|^2 \ = \ \displaystyle\sum_{m} \displaystyle\sum_{n} c_m c_n \langle 0 | U^{\dagger} A_n V(k) | 0 \rangle \langle 0 | U^{\dagger} A_m V(k) | 0 \rangle$$
<br>
新しく提案されたアダマールオーバーラップテスト(別紙参照)と呼ばれる方法を使って、この値を得るための洗練された方法がありますが、このチュートリアルではそれぞの行列を操作する標準的なアダマールテストのみを使っていきます。不幸なことに、これは補助量子ビットの使用が必要となります。我々は本質的に、補助量子ビット 事前準備したユニタリ$|b\rangle$とユニタリ $A_n$に関連する各ゲートに制御を配置します。
これで制御補助ビットに対してこれになるようなものを得られます。
```
#アダマールテストで |<b|psi>|^2 を計算するための補助回路を構成する
def control_fixed_ansatz(qubits, parameters, ancilla, reg):
for i in range (0, len(qubits)):
circ.cry(parameters[0][i], qiskit.circuit.Qubit(reg, ancilla), qiskit.circuit.Qubit(reg, qubits[i]))
circ.ccx(ancilla, qubits[1], 4)
circ.cz(qubits[0], 4)
circ.ccx(ancilla, qubits[1], 4)
circ.ccx(ancilla, qubits[0], 4)
circ.cz(qubits[2], 4)
circ.ccx(ancilla, qubits[0], 4)
for i in range (0, len(qubits)):
circ.cry(parameters[1][i], qiskit.circuit.Qubit(reg, ancilla), qiskit.circuit.Qubit(reg, qubits[i]))
circ.ccx(ancilla, qubits[2], 4)
circ.cz(qubits[1], 4)
circ.ccx(ancilla, qubits[2], 4)
circ.ccx(ancilla, qubits[0], 4)
circ.cz(qubits[2], 4)
circ.ccx(ancilla, qubits[0], 4)
for i in range (0, len(qubits)):
circ.cry(parameters[2][i], qiskit.circuit.Qubit(reg, ancilla), qiskit.circuit.Qubit(reg, qubits[i]))
q_reg = QuantumRegister(5)
circ = QuantumCircuit(q_reg)
control_fixed_ansatz([1, 2, 3], [[1, 1, 1], [1, 1, 1], [1, 1, 1]], 0, q_reg)
circ.draw()
```
外部量子ビット`q0_4`に注目してください。これは補助量子ビットであり、この回路にあるように$CCZ$ゲートを作れるようになます。今、我々も$U$も回路で作る必要があります。我々の実装では、$U$を次のように構成します。
<br>
$$U \ = \ H_1 H_2 H_3$$
<br>
故に、我々は次の関数を作れます。
```
def control_b(ancilla, qubits):
for ia in qubits:
circ.ch(ancilla, ia)
circ = QuantumCircuit(4)
control_b(0, [1, 2, 3])
circ.draw()
```
最終的に、我々は新しいアダマールテストを構成します。
```
# <psi|psi>を計算する制御アダマールテストを構成する
def special_had_test(gate_type, qubits, ancilla_index, parameters, reg):
circ.h(ancilla_index)
control_fixed_ansatz(qubits, parameters, ancilla_index, reg)
for ty in range (0, len(gate_type)):
if (gate_type[ty] == 1):
circ.cz(ancilla_index, qubits[ty])
control_b(ancilla_index, qubits)
circ.h(ancilla_index)
q_reg = QuantumRegister(5)
circ = QuantumCircuit(q_reg)
special_had_test([[0, 0, 0], [0, 0, 1]], [1, 2, 3], 0, [[1, 1, 1], [1, 1, 1], [1, 1, 1]], q_reg)
circ.draw()
```
これは、我々のパラメーターの全てが$1$に設定されたときの特別な実装であり、ゲート$A_n$の集合は単に`[0, 0, 0]`が全ての量子ビット上での恒等行列を作用させることに相当し、`[0, 0, 1]`が第3量子ビットに$Z$行列を作用させるものになります。(我々の"コード表記法"に従う)
さて、最終的なコスト関数を計算する準備ができました。これは単純に、異なる回路からの期待出力のすべての組み合わせの積を取り、それぞれの係数を掛け合わせて、前に説明したコスト関数に配置することを意味します!
```
#全体のコスト関数を実装する
def calculate_cost_function(parameters):
global opt
overall_sum_1 = 0
parameters = [parameters[0:3], parameters[3:6], parameters[6:9]]
for i in range(0, len(gate_set)):
for j in range(0, len(gate_set)):
global circ
qctl = QuantumRegister(5)
qc = ClassicalRegister(5)
circ = QuantumCircuit(qctl, qc)
backend = Aer.get_backend('statevector_simulator')
multiply = coefficient_set[i]*coefficient_set[j]
had_test([gate_set[i], gate_set[j]], [1, 2, 3], 0, parameters)
job = execute(circ, backend)
result = job.result()
outputstate = np.real(result.get_statevector(circ, decimals=100))
o = outputstate
m_sum = 0
for l in range (0, len(o)):
if (l%2 == 1):
n = o[l]**2
m_sum+=n
overall_sum_1+=multiply*(1-(2*m_sum))
overall_sum_2 = 0
for i in range(0, len(gate_set)):
for j in range(0, len(gate_set)):
multiply = coefficient_set[i]*coefficient_set[j]
mult = 1
for extra in range(0, 2):
qctl = QuantumRegister(5)
qc = ClassicalRegister(5)
circ = QuantumCircuit(qctl, qc)
backend = Aer.get_backend('statevector_simulator')
if (extra == 0):
special_had_test(gate_set[i], [1, 2, 3], 0, parameters, qctl)
if (extra == 1):
special_had_test(gate_set[j], [1, 2, 3], 0, parameters, qctl)
job = execute(circ, backend)
result = job.result()
outputstate = np.real(result.get_statevector(circ, decimals=100))
o = outputstate
m_sum = 0
for l in range (0, len(o)):
if (l%2 == 1):
n = o[l]**2
m_sum+=n
mult = mult*(1-(2*m_sum))
overall_sum_2+=multiply*mult
print(1-float(overall_sum_2/overall_sum_1))
return 1-float(overall_sum_2/overall_sum_1)
```
このコードは長くて大変そうに見えますが、そんなことはありません!この状況では、数値的なアプローチを取っており、ここで、$1$状態にある補助量子ビットアダマール用量子ビットの測定に一致する各々の状態の振幅の2乗を計算し、この情報を元に$P(0) \ - \ P(1) \ = \ 1 \ - \ 2P(1)$を計算します。これは非常に正確ですが、(後でサンプリングについて議論しますが)現実の量子デバイスではこれらの確率を生成するために何度も回路のサンプリングを行わないといけないので、現実的ではありません。加えて、このコードは最適化されていません(必要以上に回路の評価を完成させています)。しかしこれが実装可能な最も簡単な方法であり、近い将来このチュートリアルを更新して最適化する予定です。
最後のステップは、実際にある一次方程式を解くことをこのコードを使って実施してみることです。まずは1つ例を見ていきましょう。
<br>
$$A \ = \ 0.45 Z_3 \ + \ 0.55 \mathbb{I}$$
<br>
コスト関数を最小化するために、COBYLA最適化手法を繰り返し使用していきます。パラメーターに対する我々の探査空間は初期ではランダムに選択される$\frac{k}{1000} (\ k \ \in \ \{0, \ 3000\} )$で決定されます。最適化された状態ベクトルを獲得するために、オプティマイザーを200ステップ実行し、そこで停止して最適なパラメーターに対して補助ビットを適用します。さらに、このアルゴリズムが実際に機能するかを確認するために、いくつかの後処理を計算します!これをするために、最適なベクトル$|\psi\rangle_o$に$A$を適用し、それを正規化し、このベクトルと回答ベクトル $|b\rangle$との内積の2乗を計算します。これを全てコードに書き下すと以下のようになります。
```
coefficient_set = [0.55, 0.45]
gate_set = [[0, 0, 0], [0, 0, 1]]
out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200})
print(out)
out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]]
circ = QuantumCircuit(3, 3)
apply_fixed_ansatz([0, 1, 2], out_f)
backend = Aer.get_backend('statevector_simulator')
job = execute(circ, backend)
result = job.result()
o = result.get_statevector(circ, decimals=10)
a1 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]])
a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]])
a3 = np.add(a1, a2)
b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))])
print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2)
```
見て取れるように、コスト関数は非常に小さい値`0.03273673575407443`を達成しています。そして古典的コスト関数計算すると、我々が測定したものと完全に一致するように `0.96776862579723`を得ます。ベクトル$|\psi\rangle_o$と$|b\rangle$はとても似ています。
別のテストやってみましょう。このとき、$|b\rangle$を同じにしておきますが、次のようにします。
<br>
$$A \ = \ 0.55 \mathbb{I} \ + \ 0.225 Z_2 \ + \ 0.225 Z_3$$
再び、最適化コードを実行します。
```
coefficient_set = [0.55, 0.225, 0.225]
gate_set = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200})
print(out)
out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]]
circ = QuantumCircuit(3, 3)
apply_fixed_ansatz([0, 1, 2], out_f)
backend = Aer.get_backend('statevector_simulator')
job = execute(circ, backend)
result = job.result()
o = result.get_statevector(circ, decimals=10)
a1 = coefficient_set[2]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]])
a0 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,-1,0,0,0,0,0], [0,0,0,-1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]])
a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]])
a3 = np.add(np.add(a2, a0), a1)
b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))])
print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2)
```
再び、`0.00014718223342624626`という非常に小さいエラーとなっており、古典的コスト関数は`0.9998563418983931`になっています。正常に動いています!
今、このアルゴリズムが**理論通り**動いていることがわかりました。数値的に確率を計算する代わりに回路をサンプリングしてある回路のシミュレーションをいくつか実行してみました。そこで、実際の量子コンピューターが動いているように、量子回路を**取り出して**みましょう!いくつかの理由により、このシミュレーションだけは、とんでもないショット(結果の確率分布を計算するために回路を実行する)数に対して、ほんの少し良く収束するでしょう。これは、量子回路をサンプリングする際のノイズ性(同じパラメータで測定しても、必ずしも同じ結果が得られるとは限らない)から、古典的なオプティマイザ(COBYLA)の限界に起因していることがほとんどだと考えられます。幸運なことに、SPSAのようなノイズのある関数に対して構築した他のオプティマイザーがあります。しかしこのチュートリアルではそこまで踏み込みません。$A$の2番目の値と同じ行列$U$のサンプリングをしてみましょう。
```
#Implements the entire cost function on the quantum circuit (sampling, 100000 shots)
def calculate_cost_function(parameters):
global opt
overall_sum_1 = 0
parameters = [parameters[0:3], parameters[3:6], parameters[6:9]]
for i in range(0, len(gate_set)):
for j in range(0, len(gate_set)):
global circ
qctl = QuantumRegister(5)
qc = ClassicalRegister(1)
circ = QuantumCircuit(qctl, qc)
backend = Aer.get_backend('qasm_simulator')
multiply = coefficient_set[i]*coefficient_set[j]
had_test([gate_set[i], gate_set[j]], [1, 2, 3], 0, parameters)
circ.measure(0, 0)
job = execute(circ, backend, shots=100000)
result = job.result()
outputstate = result.get_counts(circ)
if ('1' in outputstate.keys()):
m_sum = float(outputstate["1"])/100000
else:
m_sum = 0
overall_sum_1+=multiply*(1-2*m_sum)
overall_sum_2 = 0
for i in range(0, len(gate_set)):
for j in range(0, len(gate_set)):
multiply = coefficient_set[i]*coefficient_set[j]
mult = 1
for extra in range(0, 2):
qctl = QuantumRegister(5)
qc = ClassicalRegister(1)
circ = QuantumCircuit(qctl, qc)
backend = Aer.get_backend('qasm_simulator')
if (extra == 0):
special_had_test(gate_set[i], [1, 2, 3], 0, parameters, qctl)
if (extra == 1):
special_had_test(gate_set[j], [1, 2, 3], 0, parameters, qctl)
circ.measure(0, 0)
job = execute(circ, backend, shots=100000)
result = job.result()
outputstate = result.get_counts(circ)
if ('1' in outputstate.keys()):
m_sum = float(outputstate["1"])/100000
else:
m_sum = 0
mult = mult*(1-2*m_sum)
overall_sum_2+=multiply*mult
print(1-float(overall_sum_2/overall_sum_1))
return 1-float(overall_sum_2/overall_sum_1)
coefficient_set = [0.55, 0.225, 0.225]
gate_set = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200})
print(out)
out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]]
circ = QuantumCircuit(3, 3)
apply_fixed_ansatz([0, 1, 2], out_f)
backend = Aer.get_backend('statevector_simulator')
job = execute(circ, backend)
result = job.result()
o = result.get_statevector(circ, decimals=10)
a1 = coefficient_set[2]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]])
a0 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,-1,0,0,0,0,0], [0,0,0,-1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]])
a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]])
a3 = np.add(np.add(a2, a0), a1)
b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))])
print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2)
```
見てきたように、驚くことなく、この解法はまだかなりのマージンで外れています。($3.677\%$エラーは酷くはありませんが、しかし理想的には、**より**0に近づけていこうとしています。)さらに、これは実際の量子回路が原因であったわけではなく、オプティマイザー自身のせいだと考えれます。この問題(先ほど述べたように、ノイズありオプティマイザーの導入のように)を正す方法を理解してから、このノートブックを更新していきます。
## 4. 謝辞
この実装は、[研究論文\"Variational Quantum Linear Solver: A Hybrid Algorithm for Linear Systems\", written by Carlos Bravo-Prieto, Ryan LaRose, M. Cerezo, Yiğit Subaşı, Lukasz Cincio, and Patrick J. Coles](https://arxiv.org/abs/1909.05820) で述べられる内容を元にしています。
論文に関する私の質問に答えてくれたカルロス・ブラボ=プリエト氏に特別な感謝の意を表したいと思います。
```
import qiskit
qiskit.__qiskit_version__
```
|
github_jupyter
|
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import Aer, execute
import math
import random
import numpy as np
from scipy.optimize import minimize
def apply_fixed_ansatz(qubits, parameters):
for iz in range (0, len(qubits)):
circ.ry(parameters[0][iz], qubits[iz])
circ.cz(qubits[0], qubits[1])
circ.cz(qubits[2], qubits[0])
for iz in range (0, len(qubits)):
circ.ry(parameters[1][iz], qubits[iz])
circ.cz(qubits[1], qubits[2])
circ.cz(qubits[2], qubits[0])
for iz in range (0, len(qubits)):
circ.ry(parameters[2][iz], qubits[iz])
circ = QuantumCircuit(3)
apply_fixed_ansatz([0, 1, 2], [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
circ.draw()
#アダマールテストを構成する
def had_test(gate_type, qubits, ancilla_index, parameters):
circ.h(ancilla_index)
apply_fixed_ansatz(qubits, parameters)
for ie in range (0, len(gate_type[0])):
if (gate_type[0][ie] == 1):
circ.cz(ancilla_index, qubits[ie])
for ie in range (0, len(gate_type[1])):
if (gate_type[1][ie] == 1):
circ.cz(ancilla_index, qubits[ie])
circ.h(ancilla_index)
circ = QuantumCircuit(4)
had_test([[0, 0, 0], [0, 0, 1]], [1, 2, 3], 0, [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
circ.draw()
#アダマールテストで |<b|psi>|^2 を計算するための補助回路を構成する
def control_fixed_ansatz(qubits, parameters, ancilla, reg):
for i in range (0, len(qubits)):
circ.cry(parameters[0][i], qiskit.circuit.Qubit(reg, ancilla), qiskit.circuit.Qubit(reg, qubits[i]))
circ.ccx(ancilla, qubits[1], 4)
circ.cz(qubits[0], 4)
circ.ccx(ancilla, qubits[1], 4)
circ.ccx(ancilla, qubits[0], 4)
circ.cz(qubits[2], 4)
circ.ccx(ancilla, qubits[0], 4)
for i in range (0, len(qubits)):
circ.cry(parameters[1][i], qiskit.circuit.Qubit(reg, ancilla), qiskit.circuit.Qubit(reg, qubits[i]))
circ.ccx(ancilla, qubits[2], 4)
circ.cz(qubits[1], 4)
circ.ccx(ancilla, qubits[2], 4)
circ.ccx(ancilla, qubits[0], 4)
circ.cz(qubits[2], 4)
circ.ccx(ancilla, qubits[0], 4)
for i in range (0, len(qubits)):
circ.cry(parameters[2][i], qiskit.circuit.Qubit(reg, ancilla), qiskit.circuit.Qubit(reg, qubits[i]))
q_reg = QuantumRegister(5)
circ = QuantumCircuit(q_reg)
control_fixed_ansatz([1, 2, 3], [[1, 1, 1], [1, 1, 1], [1, 1, 1]], 0, q_reg)
circ.draw()
def control_b(ancilla, qubits):
for ia in qubits:
circ.ch(ancilla, ia)
circ = QuantumCircuit(4)
control_b(0, [1, 2, 3])
circ.draw()
# <psi|psi>を計算する制御アダマールテストを構成する
def special_had_test(gate_type, qubits, ancilla_index, parameters, reg):
circ.h(ancilla_index)
control_fixed_ansatz(qubits, parameters, ancilla_index, reg)
for ty in range (0, len(gate_type)):
if (gate_type[ty] == 1):
circ.cz(ancilla_index, qubits[ty])
control_b(ancilla_index, qubits)
circ.h(ancilla_index)
q_reg = QuantumRegister(5)
circ = QuantumCircuit(q_reg)
special_had_test([[0, 0, 0], [0, 0, 1]], [1, 2, 3], 0, [[1, 1, 1], [1, 1, 1], [1, 1, 1]], q_reg)
circ.draw()
#全体のコスト関数を実装する
def calculate_cost_function(parameters):
global opt
overall_sum_1 = 0
parameters = [parameters[0:3], parameters[3:6], parameters[6:9]]
for i in range(0, len(gate_set)):
for j in range(0, len(gate_set)):
global circ
qctl = QuantumRegister(5)
qc = ClassicalRegister(5)
circ = QuantumCircuit(qctl, qc)
backend = Aer.get_backend('statevector_simulator')
multiply = coefficient_set[i]*coefficient_set[j]
had_test([gate_set[i], gate_set[j]], [1, 2, 3], 0, parameters)
job = execute(circ, backend)
result = job.result()
outputstate = np.real(result.get_statevector(circ, decimals=100))
o = outputstate
m_sum = 0
for l in range (0, len(o)):
if (l%2 == 1):
n = o[l]**2
m_sum+=n
overall_sum_1+=multiply*(1-(2*m_sum))
overall_sum_2 = 0
for i in range(0, len(gate_set)):
for j in range(0, len(gate_set)):
multiply = coefficient_set[i]*coefficient_set[j]
mult = 1
for extra in range(0, 2):
qctl = QuantumRegister(5)
qc = ClassicalRegister(5)
circ = QuantumCircuit(qctl, qc)
backend = Aer.get_backend('statevector_simulator')
if (extra == 0):
special_had_test(gate_set[i], [1, 2, 3], 0, parameters, qctl)
if (extra == 1):
special_had_test(gate_set[j], [1, 2, 3], 0, parameters, qctl)
job = execute(circ, backend)
result = job.result()
outputstate = np.real(result.get_statevector(circ, decimals=100))
o = outputstate
m_sum = 0
for l in range (0, len(o)):
if (l%2 == 1):
n = o[l]**2
m_sum+=n
mult = mult*(1-(2*m_sum))
overall_sum_2+=multiply*mult
print(1-float(overall_sum_2/overall_sum_1))
return 1-float(overall_sum_2/overall_sum_1)
coefficient_set = [0.55, 0.45]
gate_set = [[0, 0, 0], [0, 0, 1]]
out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200})
print(out)
out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]]
circ = QuantumCircuit(3, 3)
apply_fixed_ansatz([0, 1, 2], out_f)
backend = Aer.get_backend('statevector_simulator')
job = execute(circ, backend)
result = job.result()
o = result.get_statevector(circ, decimals=10)
a1 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]])
a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]])
a3 = np.add(a1, a2)
b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))])
print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2)
coefficient_set = [0.55, 0.225, 0.225]
gate_set = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200})
print(out)
out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]]
circ = QuantumCircuit(3, 3)
apply_fixed_ansatz([0, 1, 2], out_f)
backend = Aer.get_backend('statevector_simulator')
job = execute(circ, backend)
result = job.result()
o = result.get_statevector(circ, decimals=10)
a1 = coefficient_set[2]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]])
a0 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,-1,0,0,0,0,0], [0,0,0,-1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]])
a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]])
a3 = np.add(np.add(a2, a0), a1)
b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))])
print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2)
#Implements the entire cost function on the quantum circuit (sampling, 100000 shots)
def calculate_cost_function(parameters):
global opt
overall_sum_1 = 0
parameters = [parameters[0:3], parameters[3:6], parameters[6:9]]
for i in range(0, len(gate_set)):
for j in range(0, len(gate_set)):
global circ
qctl = QuantumRegister(5)
qc = ClassicalRegister(1)
circ = QuantumCircuit(qctl, qc)
backend = Aer.get_backend('qasm_simulator')
multiply = coefficient_set[i]*coefficient_set[j]
had_test([gate_set[i], gate_set[j]], [1, 2, 3], 0, parameters)
circ.measure(0, 0)
job = execute(circ, backend, shots=100000)
result = job.result()
outputstate = result.get_counts(circ)
if ('1' in outputstate.keys()):
m_sum = float(outputstate["1"])/100000
else:
m_sum = 0
overall_sum_1+=multiply*(1-2*m_sum)
overall_sum_2 = 0
for i in range(0, len(gate_set)):
for j in range(0, len(gate_set)):
multiply = coefficient_set[i]*coefficient_set[j]
mult = 1
for extra in range(0, 2):
qctl = QuantumRegister(5)
qc = ClassicalRegister(1)
circ = QuantumCircuit(qctl, qc)
backend = Aer.get_backend('qasm_simulator')
if (extra == 0):
special_had_test(gate_set[i], [1, 2, 3], 0, parameters, qctl)
if (extra == 1):
special_had_test(gate_set[j], [1, 2, 3], 0, parameters, qctl)
circ.measure(0, 0)
job = execute(circ, backend, shots=100000)
result = job.result()
outputstate = result.get_counts(circ)
if ('1' in outputstate.keys()):
m_sum = float(outputstate["1"])/100000
else:
m_sum = 0
mult = mult*(1-2*m_sum)
overall_sum_2+=multiply*mult
print(1-float(overall_sum_2/overall_sum_1))
return 1-float(overall_sum_2/overall_sum_1)
coefficient_set = [0.55, 0.225, 0.225]
gate_set = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
out = minimize(calculate_cost_function, x0=[float(random.randint(0,3000))/1000 for i in range(0, 9)], method="COBYLA", options={'maxiter':200})
print(out)
out_f = [out['x'][0:3], out['x'][3:6], out['x'][6:9]]
circ = QuantumCircuit(3, 3)
apply_fixed_ansatz([0, 1, 2], out_f)
backend = Aer.get_backend('statevector_simulator')
job = execute(circ, backend)
result = job.result()
o = result.get_statevector(circ, decimals=10)
a1 = coefficient_set[2]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,-1,0,0,0], [0,0,0,0,0,-1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]])
a0 = coefficient_set[1]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,-1,0,0,0,0,0], [0,0,0,-1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,-1,0], [0,0,0,0,0,0,0,-1]])
a2 = coefficient_set[0]*np.array([[1,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0], [0,0,1,0,0,0,0,0], [0,0,0,1,0,0,0,0], [0,0,0,0,1,0,0,0], [0,0,0,0,0,1,0,0], [0,0,0,0,0,0,1,0], [0,0,0,0,0,0,0,1]])
a3 = np.add(np.add(a2, a0), a1)
b = np.array([float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8)),float(1/np.sqrt(8))])
print((b.dot(a3.dot(o)/(np.linalg.norm(a3.dot(o)))))**2)
import qiskit
qiskit.__qiskit_version__
| 0.273477 | 0.980673 |
```
from __future__ import division
from collections import defaultdict
import pickle
import os
import sys
import copy
import random
import json
import pygraphviz as pgv
import numpy as np
import pandas as pd
import xml.etree.ElementTree
from lentil import datatools
from matplotlib import pyplot as plt
import seaborn as sns
%matplotlib inline
import matplotlib as mpl
mpl.rc('savefig', dpi=300)
mpl.rc('text', usetex=True)
```
Download the Mnemosyne log data from [here](https://archive.org/details/20140127MnemosynelogsAll.db) and add the decompressed contents to the `data` directory. I would recommend creating a reasonably-sized random sample of logs from the full db before loading the data into this notebook, since there are ~120 million logs in total. You can use the following commands:
```
sqlite3 2014-01-27-mnemosynelogs-all.db
.mode csv
.headers on
.output mnemosynelogs_mini.csv
select * from log order by Random() limit 10000000;
```
Check how many logs have public items
```
public_itemids = defaultdict(set)
fs = [x for x in os.listdir(os.path.join('data', 'shared_decks')) if '.xml' in x]
for f in fs:
try:
e = xml.etree.ElementTree.parse(os.path.join('data', 'shared_decks', f)).getroot()
for x in e.findall('log'):
public_itemids[x.get('o_id')].add(f)
except:
continue
len(public_itemids)
num_logs_of_file = defaultdict(int)
num_logs_of_item = defaultdict(int)
logged_itemids = set()
num_public_logs = 0
with open(os.path.join('data', 'mnemosynelogs_itemids_full.csv'), 'rb') as f:
f.readline()
for line in f:
line = line.replace('\r\n', '')
if line != '':
if line in public_itemids:
num_public_logs += 1
num_logs_of_item[line] += 1
for f in public_itemids[line]:
num_logs_of_file[f] += 1
logged_itemids.add(line)
num_public_logs
len(logged_itemids)
sum(1 for x in public_itemids if x in logged_itemids)
sorted(num_logs_of_item.items(), key=lambda (k,v): v, reverse=True)[:500]
sorted(num_logs_of_file.items(), key=lambda (k,v): v, reverse=True)[:50]
def contents_of_items_in_file(f):
e = xml.etree.ElementTree.parse(os.path.join('data', 'shared_decks', f)).getroot()
D = {}
M = {}
for x in e.findall('log'):
if x.get('type') == '16':
b = x.find('b')
if b is None:
b = x.find('m_1')
f = x.find('f')
if b is not None or f is not None:
D[x.get('o_id')] = (b.text if b is not None else None, f.text if f is not None else None)
elif x.get('type') == '6':
M[x.get('o_id')] = x.get('fact')
return {k: D[v] for k, v in M.iteritems()}
contents_of_item_id = {}
for f in os.listdir(os.path.join('data', 'shared_decks')):
if '.xml' in f:
try:
contents_of_item_id.update(contents_of_items_in_file(f))
except:
pass
len(contents_of_item_id)
contents_of_item_id
with open(os.path.join('data', 'content_features.pkl'), 'wb') as f:
pickle.dump(contents_of_item_id, f, pickle.HIGHEST_PROTOCOL)
```
Filter logs for public items
```
with open(os.path.join('data', 'mnemosynelogs_full.csv'), 'rb') as f:
with open(os.path.join('data', 'mnemosynelogs_full_filtered.csv'), 'wb') as g:
g.write(f.readline())
for line in f:
fields = line.split(',')
if fields[4] != '' and fields[3] in contents_of_item_id:
g.write(line)
```
Make the data set manageably smaller by filtering out users with short/long review histories
```
unfiltered_logs = pd.read_table(os.path.join('data', 'mnemosynelogs_full_filtered.csv'), delimiter=',')
num_ixns_of_user = unfiltered_logs['user_id'].value_counts()
user_ids = unfiltered_logs['user_id'].unique()
mn = 10
mx = 50000
len(user_ids), sum(1 for x in user_ids if num_ixns_of_user[x] > mn and num_ixns_of_user[x] < mx), sum(num_ixns_of_user[x] for x in user_ids if num_ixns_of_user[x] > mn and num_ixns_of_user[x] < mx)
user_ids = {x for x in user_ids if num_ixns_of_user[x] > mn and num_ixns_of_user[x] < mx}
filtered_logs = unfiltered_logs[unfiltered_logs['user_id'].isin(user_ids)]
filtered_logs.to_csv(os.path.join('data', 'mnemosynelogs_full_filtered_pruned.csv'), index=False)
```
Load the filtered logs and compute basic stats summarizing the data set
```
df = pd.read_csv(os.path.join('data', 'mnemosynelogs_full_filtered_pruned.csv'), delimiter=',')
print '\n'.join(df.columns)
len(df[~np.isnan(df['grade'])])
print "Number of interactions = %d" % len(df)
print "Number of unique students = %d" % len(df['user_id'].unique())
print "Number of unique modules = %d" % len(df['object_id'].unique())
av = np.array(df['actual_interval'].values)
sv = np.array(df['scheduled_interval'].values)
av, sv = zip(*[(x, y) for x, y in zip(av, sv) if x>0 and y>0 and not np.isnan(x) and not np.isnan(y)])
av = np.array(av)
sv = np.array(sv)
plt.xlabel('log10(Scheduled interval) (log10-milliseconds)')
plt.ylabel('Frequency (number of interactions)')
plt.hist(np.log10(sv+1), bins=20)
plt.show()
plt.xlabel('log10(Scheduled interval) (log10-milliseconds)')
plt.ylabel('log10(Actual interval) (log10-milliseconds)')
plt.scatter(np.log10(sv+1), np.log10(av+1), alpha=0.005)
#plt.savefig(os.path.join('figures', 'mnemosyne', 'scheduled-vs-actual-intervals.pdf'))
plt.show()
v = np.array(df['user_id'].value_counts().values)
plt.xlabel('log10(Number of interactions per student)')
plt.ylabel('Frequency (number of students)')
plt.hist(np.log10(v))
plt.show()
v = np.array(df['object_id'].value_counts().values)
plt.xlabel('log10(Number of interactions per problem)')
plt.ylabel('Frequency (number of problems)')
plt.hist(np.log10(v))
plt.show()
grades = np.array(df['grade'].values)
plt.xlabel('Grade')
plt.ylabel('Frequency (number of interactions)')
plt.hist(grades[~np.isnan(grades)])
plt.show()
```
Apply more filters and format the log data into an `InteractionHistory` that can be understood by [lentil](https://github.com/rddy/lentil)
```
def interaction_history_from_mnemosyne_data_set(data):
"""
Parse Mnemosyne data set into an interaction history
:param pd.DataFrame data: A dataframe of raw log data
:rtype: datatools.InteractionHistory
:return: An interaction history object
"""
data = data[data['grade'].apply(lambda x: not np.isnan(x))]
data = data[['user_id', 'student_id', 'object_id', 'grade', 'timestamp', 'thinking_time', 'actual_interval', 'scheduled_interval']]
data.columns = ['user_id', 'student_id', 'module_id', 'outcome', 'timestamp', 'duration', 'actual_interval', 'scheduled_interval']
data['outcome'] = data['outcome'].apply(lambda x: x > 1)
student_timesteps = defaultdict(int)
timesteps = [None] * len(data)
for i, (_, ixn) in enumerate(data.iterrows()):
student_timesteps[ixn['student_id']] += 1
timesteps[i] = student_timesteps[ixn['student_id']]
data['timestep'] = timesteps
data['module_type'] = [datatools.AssessmentInteraction.MODULETYPE] * len(data)
return datatools.InteractionHistory(data, sort_by_timestep=True)
df.sort('timestamp', inplace=True)
# this is helpful for splitting histories by user-item pair (instead of by user) in lentil.evaluate
df['student_id'] = [str(x['user_id'])+'-'+str(x['object_id']) for _, x in df.iterrows()]
unfiltered_history = interaction_history_from_mnemosyne_data_set(df)
unfiltered_history.data['outcome'].value_counts()
```
Perform analagous preprocessing steps for the MTurk data set
```
data = []
with open(os.path.join('data', 'first_mturk_experiment.dataset'), 'rb') as f:
for line in f:
data.append(json.loads(line))
df = pd.DataFrame(data)
df['delta_t'] = df['delta_t'] * 4 * 60 * 60 # seconds
num_ixns_per_user_item = {k: defaultdict(list) for k in df['user'].unique()}
for _, ixn in df.iterrows():
num_ixns_per_user_item[ixn['user']][ixn['item']].append(ixn['delta_t'])
start_time_of_user_item = {}
for user, num_ixns_per_item in num_ixns_per_user_item.iteritems():
start_time = 0
for item, delta_ts in num_ixns_per_item.iteritems():
start_time_of_user_item[(user, item)] = start_time
start_time += sum(delta_ts)
df.sort('n_reps', inplace=True)
timestamps = []
for _, ixn in df.iterrows():
user_item = (ixn['user'], ixn['item'])
start_time_of_user_item[user_item] += ixn['delta_t']
timestamps.append(start_time_of_user_item[user_item])
df['timestamp'] = timestamps
df.sort('timestamp', inplace=True)
def interaction_history_from_mturk_data_set(data):
"""
Parse MTurk data set into an interaction history
:param pd.DataFrame data: A dataframe of raw log data
:rtype: datatools.InteractionHistory
:return: An interaction history object
"""
data = data[['user', 'user', 'item', 'bin_score', 'timestamp']]
data.columns = ['user_id', 'student_id', 'module_id', 'outcome', 'timestamp']
data['outcome'] = data['outcome'].apply(lambda x: x == 1)
student_timesteps = defaultdict(int)
timesteps = [None] * len(data)
for i, (_, ixn) in enumerate(data.iterrows()):
student_timesteps[ixn['student_id']] += 1
timesteps[i] = student_timesteps[ixn['student_id']]
data['timestep'] = timesteps
data['module_type'] = [datatools.AssessmentInteraction.MODULETYPE] * len(data)
return datatools.InteractionHistory(data, sort_by_timestep=True)
unfiltered_history = interaction_history_from_mturk_data_set(df)
```
Pre-process the `dutch_big` dataset
```
data = []
with open(os.path.join('data', 'dutch_big.dump'), 'rb') as f:
for line in f:
data.append((line.split('\t')[0], json.loads(line.split('\t')[1])))
original_of_module_id = {}
for _, h in data:
for x in h:
original_of_module_id[x['foreign']] = x['original']
with open(os.path.join('data', 'original_of_module_id.pkl'), 'wb') as f:
pickle.dump(original_of_module_id, f, pickle.HIGHEST_PROTOCOL)
ixns = []
timestamp_of_student = defaultdict(int)
for student_id, h in data:
for ixn in h:
timestamp_of_student[student_id] += 1
ixns.append(
{'student_id' : student_id, 'module_id' : ixn['foreign'],
'outcome' : ixn['score'] > 2, 'timestamp' : timestamp_of_student[student_id]})
df = pd.DataFrame(ixns)
df['user_id'] = df['student_id']
df['student_id'] = df['user_id'] + '-' + df['module_id']
len(df)
df.sort('timestamp', inplace=True)
def interaction_history_from_dutch_big_data_set(data):
"""
Parse MTurk data set into an interaction history
:param pd.DataFrame data: A dataframe of raw log data
:rtype: datatools.InteractionHistory
:return: An interaction history object
"""
data = data[['user_id', 'student_id', 'module_id', 'outcome', 'timestamp']]
data.columns = ['user_id', 'student_id', 'module_id', 'outcome', 'timestamp']
student_timesteps = defaultdict(int)
timesteps = [None] * len(data)
for i, (_, ixn) in enumerate(data.iterrows()):
student_timesteps[ixn['student_id']] += 1
timesteps[i] = student_timesteps[ixn['student_id']]
data['timestep'] = timesteps
data['module_type'] = [datatools.AssessmentInteraction.MODULETYPE] * len(data)
return datatools.InteractionHistory(data, sort_by_timestep=True)
unfiltered_history = interaction_history_from_dutch_big_data_set(df)
```
Apply additional data filters
```
def filter_history(history, min_num_ixns=5, max_num_ixns=sys.maxint):
"""
Filter history for students with histories of bounded length,
and modules with enough interactions
:param datatools.InteractionHistory history: An interaction history
:param int min_num_ixns: Minimum number of timesteps in student history,
and minimum number of interactions for module
:param int max_num_ixns: Maximum number of timesteps in student history
:rtype: datatools.InteractionHistory
:return: A filtered interaction history
"""
students = set(history.data['student_id'][(
history.data['timestep'] > min_num_ixns) & (
history.data['module_type']==datatools.AssessmentInteraction.MODULETYPE)])
students -= set(history.data['student_id'][history.data['timestep'] >= max_num_ixns])
modules = {module_id for module_id, group in history.data.groupby('module_id') if len(group) > min_num_ixns}
return datatools.InteractionHistory(
history.data[(history.data['student_id'].isin(students)) & (
history.data['module_id'].isin(modules))],
reindex_timesteps=True,
size_of_test_set=0.2)
# apply the filter a couple of times, since removing student histories
# may cause certain modules to drop below the min_num_ixns threshold,
# and removing modules may cause student histories to drop below
# the min_num_ixns threshold
REPEATED_FILTER = 3 # number of times to repeat filtering
history = reduce(
lambda acc, _: filter_history(acc, min_num_ixns=2, max_num_ixns=10000),
range(REPEATED_FILTER), unfiltered_history)
history.data.sort('timestamp', inplace=True)
deck_of_student_item = {}
tlast_of_student_item = {}
nreps_of_student_item = {}
deck = []
tlast = []
nreps = []
for _, ixn in history.data.iterrows():
student_item = (ixn['user_id'], ixn['module_id'])
d = deck_of_student_item.get(student_item, 1)
deck.append(d)
if ixn['outcome']:
deck_of_student_item[student_item] = d + 1
else:
deck_of_student_item[student_item] = max(1, d-1)
n = nreps_of_student_item.get(student_item, 1)
nreps.append(n)
nreps_of_student_item[student_item] = n + 1
tlast.append(tlast_of_student_item.get(student_item, np.nan))
tlast_of_student_item[student_item] = ixn['timestamp']
history.data['deck'] = deck
history.data['nreps'] = nreps
history.data['tlast'] = tlast
# path to pickled interaction history file
history_path = os.path.join('data', 'mnemosyne_history_v2.pkl')
# serialize history
with open(history_path, 'wb') as f:
pickle.dump(history, f, pickle.HIGHEST_PROTOCOL)
```
Explore basic stats about filtered, formatted interaction history
```
# load history from file
with open(history_path, 'rb') as f:
history = pickle.load(f)
df = history.data
print "Number of interactions = %d" % len(df)
print "Number of unique students: %d" % len(df['user_id'].unique())
print "Number of unique assessments: %d" % history.num_assessments()
value_counts = df['outcome'].value_counts()
num_passes = value_counts.get(True, 0)
num_fails = value_counts.get(False, 0)
print "Overall pass rate: %f" % (num_passes / (num_passes + num_fails))
df.sort('timestamp', inplace=True)
v = []
for _, g in df.groupby(['user_id', 'module_id']):
ts = g['timestamp'].values
v.extend([nt-t for t, nt in zip(ts[:-1], ts[1:])])
v = np.array(v)
plt.xlabel('Time between reviews (log10-seconds)')
plt.ylabel('Frequency (number of reviews)')
plt.hist(np.log10(v+1), bins=20)
#plt.savefig(os.path.join('figures', 'mnemosyne', 'time-between-reviews.pdf'))
plt.show()
grouped = df.groupby(['user_id', 'module_id'])
pairs = [x for x, g in grouped if len(g) > 20]
len(pairs)
g = grouped.get_group(random.choice(pairs))
ts = g['timestamp'].values
intervals = [y-x for x, y in zip(ts[:-1], ts[1:])]
plt.xlabel('Number of reviews')
plt.ylabel('Time until next review (seconds)')
plt.title('Review intervals for a single user-item pair')
outcomes = g['outcome'].values
outcomes = outcomes[:-1]
plt.bar(range(len(outcomes)), [max(intervals)] * len(outcomes), width=1, color=['green' if x else 'red' for x in outcomes], alpha=0.25, linewidth=0.)
plt.step(range(len(intervals)+1), intervals+[intervals[-1]], where='post')
plt.yscale('log')
plt.xlim([0, len(intervals)])
plt.ylim([0, max(intervals)])
#plt.savefig(os.path.join('figures', 'mnemosyne', 'review-history-example.pdf'))
plt.show()
counts = df['user_id'].value_counts().values
plt.xlabel('Number of interactions per student')
plt.ylabel('Frequency (number of students)')
plt.hist(counts)
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'num_ixns_per_student.pdf'))
plt.show()
counts = df['module_id'][df['module_type'] == datatools.AssessmentInteraction.MODULETYPE].value_counts().values
plt.xlabel('Number of interactions per item')
plt.ylabel('Frequency (number of items)')
plt.hist(counts)
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'num_ixns_per_item.pdf'))
plt.show()
counts = df.groupby(['user_id', 'module_id']).size().values
plt.xlabel('Number of interactions per student per item')
plt.ylabel('Frequency (number of student-item pairs)')
plt.hist(counts)
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'num_ixns_per_student_per_item.pdf'))
plt.show()
num_students_per_module = [len(group['user_id'].unique()) for _, group in df.groupby('module_id')]
plt.xlabel('Number of students per item')
plt.ylabel('Frequency (number of items)')
plt.hist(num_students_per_module)
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'num-students-per-item.pdf'))
plt.show()
def get_pass_rates(grouped):
"""
Get pass rate for each group
:param pd.GroupBy grouped: A grouped dataframe
:rtype: dict[str, float]
:return: A dictionary mapping group name to pass rate
"""
pass_rates = {}
for name, group in grouped:
vc = group['outcome'].value_counts()
if True not in vc:
pass_rates[name] = 0
else:
pass_rates[name] = vc[True] / len(group)
return pass_rates
grouped = df[df['module_type']==datatools.AssessmentInteraction.MODULETYPE].groupby('user_id')
plt.xlabel('Student pass rate')
plt.ylabel('Frequency (number of students)')
plt.hist(get_pass_rates(grouped).values())
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'student-pass-rates.pdf'))
plt.show()
grouped = df[df['module_type']==datatools.AssessmentInteraction.MODULETYPE].groupby('module_id')
plt.xlabel('Assessment pass rate')
plt.ylabel('Frequency (number of assessments)')
plt.hist(get_pass_rates(grouped).values())
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'assessment-pass-rates.pdf'))
plt.show()
def make_flow_graph(interaction_logs):
"""
Create a graphviz object for the graph of
module transitions across all student paths
:param pd.DataFrame interaction_logs: An interaction history
:rtype pgv.AGraph
:return Graph of module transitions in student paths
"""
G = pgv.AGraph(directed=True)
for module_id in interaction_logs['module_id'].unique():
G.add_node(module_id)
E = defaultdict(set)
grouped = interaction_logs.groupby('user_id')
for student_id, group in grouped:
module_ids_in_student_path = group['module_id']
for source_node, target_node in zip(module_ids_in_student_path[:-1], module_ids_in_student_path[1:]):
if source_node != target_node: # stationary
E[(source_node, target_node)] |= {student_id}
for (source_node, target_node), students_that_made_transition in E.iteritems():
G.add_edge(
source_node,
target_node,
weight=len(students_that_made_transition))
return G
G = make_flow_graph(df)
G.write(os.path.join('figures', 'mnemosyne', 'mnemosyne_flow_graph.dot'))
```
|
github_jupyter
|
from __future__ import division
from collections import defaultdict
import pickle
import os
import sys
import copy
import random
import json
import pygraphviz as pgv
import numpy as np
import pandas as pd
import xml.etree.ElementTree
from lentil import datatools
from matplotlib import pyplot as plt
import seaborn as sns
%matplotlib inline
import matplotlib as mpl
mpl.rc('savefig', dpi=300)
mpl.rc('text', usetex=True)
sqlite3 2014-01-27-mnemosynelogs-all.db
.mode csv
.headers on
.output mnemosynelogs_mini.csv
select * from log order by Random() limit 10000000;
public_itemids = defaultdict(set)
fs = [x for x in os.listdir(os.path.join('data', 'shared_decks')) if '.xml' in x]
for f in fs:
try:
e = xml.etree.ElementTree.parse(os.path.join('data', 'shared_decks', f)).getroot()
for x in e.findall('log'):
public_itemids[x.get('o_id')].add(f)
except:
continue
len(public_itemids)
num_logs_of_file = defaultdict(int)
num_logs_of_item = defaultdict(int)
logged_itemids = set()
num_public_logs = 0
with open(os.path.join('data', 'mnemosynelogs_itemids_full.csv'), 'rb') as f:
f.readline()
for line in f:
line = line.replace('\r\n', '')
if line != '':
if line in public_itemids:
num_public_logs += 1
num_logs_of_item[line] += 1
for f in public_itemids[line]:
num_logs_of_file[f] += 1
logged_itemids.add(line)
num_public_logs
len(logged_itemids)
sum(1 for x in public_itemids if x in logged_itemids)
sorted(num_logs_of_item.items(), key=lambda (k,v): v, reverse=True)[:500]
sorted(num_logs_of_file.items(), key=lambda (k,v): v, reverse=True)[:50]
def contents_of_items_in_file(f):
e = xml.etree.ElementTree.parse(os.path.join('data', 'shared_decks', f)).getroot()
D = {}
M = {}
for x in e.findall('log'):
if x.get('type') == '16':
b = x.find('b')
if b is None:
b = x.find('m_1')
f = x.find('f')
if b is not None or f is not None:
D[x.get('o_id')] = (b.text if b is not None else None, f.text if f is not None else None)
elif x.get('type') == '6':
M[x.get('o_id')] = x.get('fact')
return {k: D[v] for k, v in M.iteritems()}
contents_of_item_id = {}
for f in os.listdir(os.path.join('data', 'shared_decks')):
if '.xml' in f:
try:
contents_of_item_id.update(contents_of_items_in_file(f))
except:
pass
len(contents_of_item_id)
contents_of_item_id
with open(os.path.join('data', 'content_features.pkl'), 'wb') as f:
pickle.dump(contents_of_item_id, f, pickle.HIGHEST_PROTOCOL)
with open(os.path.join('data', 'mnemosynelogs_full.csv'), 'rb') as f:
with open(os.path.join('data', 'mnemosynelogs_full_filtered.csv'), 'wb') as g:
g.write(f.readline())
for line in f:
fields = line.split(',')
if fields[4] != '' and fields[3] in contents_of_item_id:
g.write(line)
unfiltered_logs = pd.read_table(os.path.join('data', 'mnemosynelogs_full_filtered.csv'), delimiter=',')
num_ixns_of_user = unfiltered_logs['user_id'].value_counts()
user_ids = unfiltered_logs['user_id'].unique()
mn = 10
mx = 50000
len(user_ids), sum(1 for x in user_ids if num_ixns_of_user[x] > mn and num_ixns_of_user[x] < mx), sum(num_ixns_of_user[x] for x in user_ids if num_ixns_of_user[x] > mn and num_ixns_of_user[x] < mx)
user_ids = {x for x in user_ids if num_ixns_of_user[x] > mn and num_ixns_of_user[x] < mx}
filtered_logs = unfiltered_logs[unfiltered_logs['user_id'].isin(user_ids)]
filtered_logs.to_csv(os.path.join('data', 'mnemosynelogs_full_filtered_pruned.csv'), index=False)
df = pd.read_csv(os.path.join('data', 'mnemosynelogs_full_filtered_pruned.csv'), delimiter=',')
print '\n'.join(df.columns)
len(df[~np.isnan(df['grade'])])
print "Number of interactions = %d" % len(df)
print "Number of unique students = %d" % len(df['user_id'].unique())
print "Number of unique modules = %d" % len(df['object_id'].unique())
av = np.array(df['actual_interval'].values)
sv = np.array(df['scheduled_interval'].values)
av, sv = zip(*[(x, y) for x, y in zip(av, sv) if x>0 and y>0 and not np.isnan(x) and not np.isnan(y)])
av = np.array(av)
sv = np.array(sv)
plt.xlabel('log10(Scheduled interval) (log10-milliseconds)')
plt.ylabel('Frequency (number of interactions)')
plt.hist(np.log10(sv+1), bins=20)
plt.show()
plt.xlabel('log10(Scheduled interval) (log10-milliseconds)')
plt.ylabel('log10(Actual interval) (log10-milliseconds)')
plt.scatter(np.log10(sv+1), np.log10(av+1), alpha=0.005)
#plt.savefig(os.path.join('figures', 'mnemosyne', 'scheduled-vs-actual-intervals.pdf'))
plt.show()
v = np.array(df['user_id'].value_counts().values)
plt.xlabel('log10(Number of interactions per student)')
plt.ylabel('Frequency (number of students)')
plt.hist(np.log10(v))
plt.show()
v = np.array(df['object_id'].value_counts().values)
plt.xlabel('log10(Number of interactions per problem)')
plt.ylabel('Frequency (number of problems)')
plt.hist(np.log10(v))
plt.show()
grades = np.array(df['grade'].values)
plt.xlabel('Grade')
plt.ylabel('Frequency (number of interactions)')
plt.hist(grades[~np.isnan(grades)])
plt.show()
def interaction_history_from_mnemosyne_data_set(data):
"""
Parse Mnemosyne data set into an interaction history
:param pd.DataFrame data: A dataframe of raw log data
:rtype: datatools.InteractionHistory
:return: An interaction history object
"""
data = data[data['grade'].apply(lambda x: not np.isnan(x))]
data = data[['user_id', 'student_id', 'object_id', 'grade', 'timestamp', 'thinking_time', 'actual_interval', 'scheduled_interval']]
data.columns = ['user_id', 'student_id', 'module_id', 'outcome', 'timestamp', 'duration', 'actual_interval', 'scheduled_interval']
data['outcome'] = data['outcome'].apply(lambda x: x > 1)
student_timesteps = defaultdict(int)
timesteps = [None] * len(data)
for i, (_, ixn) in enumerate(data.iterrows()):
student_timesteps[ixn['student_id']] += 1
timesteps[i] = student_timesteps[ixn['student_id']]
data['timestep'] = timesteps
data['module_type'] = [datatools.AssessmentInteraction.MODULETYPE] * len(data)
return datatools.InteractionHistory(data, sort_by_timestep=True)
df.sort('timestamp', inplace=True)
# this is helpful for splitting histories by user-item pair (instead of by user) in lentil.evaluate
df['student_id'] = [str(x['user_id'])+'-'+str(x['object_id']) for _, x in df.iterrows()]
unfiltered_history = interaction_history_from_mnemosyne_data_set(df)
unfiltered_history.data['outcome'].value_counts()
data = []
with open(os.path.join('data', 'first_mturk_experiment.dataset'), 'rb') as f:
for line in f:
data.append(json.loads(line))
df = pd.DataFrame(data)
df['delta_t'] = df['delta_t'] * 4 * 60 * 60 # seconds
num_ixns_per_user_item = {k: defaultdict(list) for k in df['user'].unique()}
for _, ixn in df.iterrows():
num_ixns_per_user_item[ixn['user']][ixn['item']].append(ixn['delta_t'])
start_time_of_user_item = {}
for user, num_ixns_per_item in num_ixns_per_user_item.iteritems():
start_time = 0
for item, delta_ts in num_ixns_per_item.iteritems():
start_time_of_user_item[(user, item)] = start_time
start_time += sum(delta_ts)
df.sort('n_reps', inplace=True)
timestamps = []
for _, ixn in df.iterrows():
user_item = (ixn['user'], ixn['item'])
start_time_of_user_item[user_item] += ixn['delta_t']
timestamps.append(start_time_of_user_item[user_item])
df['timestamp'] = timestamps
df.sort('timestamp', inplace=True)
def interaction_history_from_mturk_data_set(data):
"""
Parse MTurk data set into an interaction history
:param pd.DataFrame data: A dataframe of raw log data
:rtype: datatools.InteractionHistory
:return: An interaction history object
"""
data = data[['user', 'user', 'item', 'bin_score', 'timestamp']]
data.columns = ['user_id', 'student_id', 'module_id', 'outcome', 'timestamp']
data['outcome'] = data['outcome'].apply(lambda x: x == 1)
student_timesteps = defaultdict(int)
timesteps = [None] * len(data)
for i, (_, ixn) in enumerate(data.iterrows()):
student_timesteps[ixn['student_id']] += 1
timesteps[i] = student_timesteps[ixn['student_id']]
data['timestep'] = timesteps
data['module_type'] = [datatools.AssessmentInteraction.MODULETYPE] * len(data)
return datatools.InteractionHistory(data, sort_by_timestep=True)
unfiltered_history = interaction_history_from_mturk_data_set(df)
data = []
with open(os.path.join('data', 'dutch_big.dump'), 'rb') as f:
for line in f:
data.append((line.split('\t')[0], json.loads(line.split('\t')[1])))
original_of_module_id = {}
for _, h in data:
for x in h:
original_of_module_id[x['foreign']] = x['original']
with open(os.path.join('data', 'original_of_module_id.pkl'), 'wb') as f:
pickle.dump(original_of_module_id, f, pickle.HIGHEST_PROTOCOL)
ixns = []
timestamp_of_student = defaultdict(int)
for student_id, h in data:
for ixn in h:
timestamp_of_student[student_id] += 1
ixns.append(
{'student_id' : student_id, 'module_id' : ixn['foreign'],
'outcome' : ixn['score'] > 2, 'timestamp' : timestamp_of_student[student_id]})
df = pd.DataFrame(ixns)
df['user_id'] = df['student_id']
df['student_id'] = df['user_id'] + '-' + df['module_id']
len(df)
df.sort('timestamp', inplace=True)
def interaction_history_from_dutch_big_data_set(data):
"""
Parse MTurk data set into an interaction history
:param pd.DataFrame data: A dataframe of raw log data
:rtype: datatools.InteractionHistory
:return: An interaction history object
"""
data = data[['user_id', 'student_id', 'module_id', 'outcome', 'timestamp']]
data.columns = ['user_id', 'student_id', 'module_id', 'outcome', 'timestamp']
student_timesteps = defaultdict(int)
timesteps = [None] * len(data)
for i, (_, ixn) in enumerate(data.iterrows()):
student_timesteps[ixn['student_id']] += 1
timesteps[i] = student_timesteps[ixn['student_id']]
data['timestep'] = timesteps
data['module_type'] = [datatools.AssessmentInteraction.MODULETYPE] * len(data)
return datatools.InteractionHistory(data, sort_by_timestep=True)
unfiltered_history = interaction_history_from_dutch_big_data_set(df)
def filter_history(history, min_num_ixns=5, max_num_ixns=sys.maxint):
"""
Filter history for students with histories of bounded length,
and modules with enough interactions
:param datatools.InteractionHistory history: An interaction history
:param int min_num_ixns: Minimum number of timesteps in student history,
and minimum number of interactions for module
:param int max_num_ixns: Maximum number of timesteps in student history
:rtype: datatools.InteractionHistory
:return: A filtered interaction history
"""
students = set(history.data['student_id'][(
history.data['timestep'] > min_num_ixns) & (
history.data['module_type']==datatools.AssessmentInteraction.MODULETYPE)])
students -= set(history.data['student_id'][history.data['timestep'] >= max_num_ixns])
modules = {module_id for module_id, group in history.data.groupby('module_id') if len(group) > min_num_ixns}
return datatools.InteractionHistory(
history.data[(history.data['student_id'].isin(students)) & (
history.data['module_id'].isin(modules))],
reindex_timesteps=True,
size_of_test_set=0.2)
# apply the filter a couple of times, since removing student histories
# may cause certain modules to drop below the min_num_ixns threshold,
# and removing modules may cause student histories to drop below
# the min_num_ixns threshold
REPEATED_FILTER = 3 # number of times to repeat filtering
history = reduce(
lambda acc, _: filter_history(acc, min_num_ixns=2, max_num_ixns=10000),
range(REPEATED_FILTER), unfiltered_history)
history.data.sort('timestamp', inplace=True)
deck_of_student_item = {}
tlast_of_student_item = {}
nreps_of_student_item = {}
deck = []
tlast = []
nreps = []
for _, ixn in history.data.iterrows():
student_item = (ixn['user_id'], ixn['module_id'])
d = deck_of_student_item.get(student_item, 1)
deck.append(d)
if ixn['outcome']:
deck_of_student_item[student_item] = d + 1
else:
deck_of_student_item[student_item] = max(1, d-1)
n = nreps_of_student_item.get(student_item, 1)
nreps.append(n)
nreps_of_student_item[student_item] = n + 1
tlast.append(tlast_of_student_item.get(student_item, np.nan))
tlast_of_student_item[student_item] = ixn['timestamp']
history.data['deck'] = deck
history.data['nreps'] = nreps
history.data['tlast'] = tlast
# path to pickled interaction history file
history_path = os.path.join('data', 'mnemosyne_history_v2.pkl')
# serialize history
with open(history_path, 'wb') as f:
pickle.dump(history, f, pickle.HIGHEST_PROTOCOL)
# load history from file
with open(history_path, 'rb') as f:
history = pickle.load(f)
df = history.data
print "Number of interactions = %d" % len(df)
print "Number of unique students: %d" % len(df['user_id'].unique())
print "Number of unique assessments: %d" % history.num_assessments()
value_counts = df['outcome'].value_counts()
num_passes = value_counts.get(True, 0)
num_fails = value_counts.get(False, 0)
print "Overall pass rate: %f" % (num_passes / (num_passes + num_fails))
df.sort('timestamp', inplace=True)
v = []
for _, g in df.groupby(['user_id', 'module_id']):
ts = g['timestamp'].values
v.extend([nt-t for t, nt in zip(ts[:-1], ts[1:])])
v = np.array(v)
plt.xlabel('Time between reviews (log10-seconds)')
plt.ylabel('Frequency (number of reviews)')
plt.hist(np.log10(v+1), bins=20)
#plt.savefig(os.path.join('figures', 'mnemosyne', 'time-between-reviews.pdf'))
plt.show()
grouped = df.groupby(['user_id', 'module_id'])
pairs = [x for x, g in grouped if len(g) > 20]
len(pairs)
g = grouped.get_group(random.choice(pairs))
ts = g['timestamp'].values
intervals = [y-x for x, y in zip(ts[:-1], ts[1:])]
plt.xlabel('Number of reviews')
plt.ylabel('Time until next review (seconds)')
plt.title('Review intervals for a single user-item pair')
outcomes = g['outcome'].values
outcomes = outcomes[:-1]
plt.bar(range(len(outcomes)), [max(intervals)] * len(outcomes), width=1, color=['green' if x else 'red' for x in outcomes], alpha=0.25, linewidth=0.)
plt.step(range(len(intervals)+1), intervals+[intervals[-1]], where='post')
plt.yscale('log')
plt.xlim([0, len(intervals)])
plt.ylim([0, max(intervals)])
#plt.savefig(os.path.join('figures', 'mnemosyne', 'review-history-example.pdf'))
plt.show()
counts = df['user_id'].value_counts().values
plt.xlabel('Number of interactions per student')
plt.ylabel('Frequency (number of students)')
plt.hist(counts)
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'num_ixns_per_student.pdf'))
plt.show()
counts = df['module_id'][df['module_type'] == datatools.AssessmentInteraction.MODULETYPE].value_counts().values
plt.xlabel('Number of interactions per item')
plt.ylabel('Frequency (number of items)')
plt.hist(counts)
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'num_ixns_per_item.pdf'))
plt.show()
counts = df.groupby(['user_id', 'module_id']).size().values
plt.xlabel('Number of interactions per student per item')
plt.ylabel('Frequency (number of student-item pairs)')
plt.hist(counts)
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'num_ixns_per_student_per_item.pdf'))
plt.show()
num_students_per_module = [len(group['user_id'].unique()) for _, group in df.groupby('module_id')]
plt.xlabel('Number of students per item')
plt.ylabel('Frequency (number of items)')
plt.hist(num_students_per_module)
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'num-students-per-item.pdf'))
plt.show()
def get_pass_rates(grouped):
"""
Get pass rate for each group
:param pd.GroupBy grouped: A grouped dataframe
:rtype: dict[str, float]
:return: A dictionary mapping group name to pass rate
"""
pass_rates = {}
for name, group in grouped:
vc = group['outcome'].value_counts()
if True not in vc:
pass_rates[name] = 0
else:
pass_rates[name] = vc[True] / len(group)
return pass_rates
grouped = df[df['module_type']==datatools.AssessmentInteraction.MODULETYPE].groupby('user_id')
plt.xlabel('Student pass rate')
plt.ylabel('Frequency (number of students)')
plt.hist(get_pass_rates(grouped).values())
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'student-pass-rates.pdf'))
plt.show()
grouped = df[df['module_type']==datatools.AssessmentInteraction.MODULETYPE].groupby('module_id')
plt.xlabel('Assessment pass rate')
plt.ylabel('Frequency (number of assessments)')
plt.hist(get_pass_rates(grouped).values())
plt.yscale('log')
#plt.savefig(os.path.join('figures', 'mnemosyne', 'assessment-pass-rates.pdf'))
plt.show()
def make_flow_graph(interaction_logs):
"""
Create a graphviz object for the graph of
module transitions across all student paths
:param pd.DataFrame interaction_logs: An interaction history
:rtype pgv.AGraph
:return Graph of module transitions in student paths
"""
G = pgv.AGraph(directed=True)
for module_id in interaction_logs['module_id'].unique():
G.add_node(module_id)
E = defaultdict(set)
grouped = interaction_logs.groupby('user_id')
for student_id, group in grouped:
module_ids_in_student_path = group['module_id']
for source_node, target_node in zip(module_ids_in_student_path[:-1], module_ids_in_student_path[1:]):
if source_node != target_node: # stationary
E[(source_node, target_node)] |= {student_id}
for (source_node, target_node), students_that_made_transition in E.iteritems():
G.add_edge(
source_node,
target_node,
weight=len(students_that_made_transition))
return G
G = make_flow_graph(df)
G.write(os.path.join('figures', 'mnemosyne', 'mnemosyne_flow_graph.dot'))
| 0.296451 | 0.488039 |
<small><small><i>
All the IPython Notebooks in this lecture series by Dr. Milan Parmar are available @ **[GitHub](https://github.com/milaan9/04_Python_Functions/tree/main/002_Python_Functions_Built_in)**
</i></small></small>
# Python `zip()`
The **`zip()`** function takes iterables (can be zero or more), aggregates them in a tuple, and return it.
**Syntax**:
```python
zip(*iterables)
```
## `zip()` Parameter
| Parameter | Description |
|:----| :--- |
| **`iterables`** | **can be built-in iterables (like: list, string, dict), or user-defined iterables** |
> Recommended Reading: **[Python Iterators, __iter__ and __next__](https://github.com/milaan9/07_Python_Advanced_Topics/blob/main/001_Python_Iterators.ipynb)**
## Return Value from `zip()`
The **`zip()`** function returns an iterator of tuples based on the iterable objects.
* If we do not pass any parameter, **`zip()`** returns an empty iterator
* If a single iterable is passed, **`zip()`** returns an iterator of tuples with each tuple having only one element.
* If multiple iterables are passed, **`zip()`** returns an iterator of tuples with each tuple having elements from all the iterables.
Suppose, two iterables are passed to **`zip()`**; one iterable containing three and other containing five elements. Then, the returned iterator will contain three tuples. It's because iterator stops when the shortest iterable is exhausted.
```
# Example 1: Python zip()
number_list = [1, 2, 3]
str_list = ['one', 'two', 'three']
# No iterables are passed
result = zip()
# Converting itertor to list
result_list = list(result)
print(result_list)
# Two iterables are passed
result = zip(number_list, str_list)
# Converting itertor to set
result_set = set(result)
print(result_set)
# Example 2: Different number of iterable elements
numbersList = [1, 2, 3]
str_list = ['one', 'two']
numbers_tuple = ('ONE', 'TWO', 'THREE', 'FOUR')
# Notice, the size of numbersList and numbers_tuple is different
result = zip(numbersList, numbers_tuple)
# Converting to set
result_set = set(result)
print(result_set)
result = zip(numbersList, str_list, numbers_tuple)
# Converting to set
result_set = set(result)
print(result_set)
```
The **`*`** operator can be used in conjunction with **`zip()`** to unzip the list.
```python
zip(*zippedList)
```
```
# Example 3: Unzipping the Value Using zip()
coordinate = ['x', 'y', 'z']
value = [3, 4, 5]
result = zip(coordinate, value)
result_list = list(result)
print(result_list)
c, v = zip(*result_list)
print('c =', c)
print('v =', v)
```
|
github_jupyter
|
zip(*iterables)
# Example 1: Python zip()
number_list = [1, 2, 3]
str_list = ['one', 'two', 'three']
# No iterables are passed
result = zip()
# Converting itertor to list
result_list = list(result)
print(result_list)
# Two iterables are passed
result = zip(number_list, str_list)
# Converting itertor to set
result_set = set(result)
print(result_set)
# Example 2: Different number of iterable elements
numbersList = [1, 2, 3]
str_list = ['one', 'two']
numbers_tuple = ('ONE', 'TWO', 'THREE', 'FOUR')
# Notice, the size of numbersList and numbers_tuple is different
result = zip(numbersList, numbers_tuple)
# Converting to set
result_set = set(result)
print(result_set)
result = zip(numbersList, str_list, numbers_tuple)
# Converting to set
result_set = set(result)
print(result_set)
zip(*zippedList)
# Example 3: Unzipping the Value Using zip()
coordinate = ['x', 'y', 'z']
value = [3, 4, 5]
result = zip(coordinate, value)
result_list = list(result)
print(result_list)
c, v = zip(*result_list)
print('c =', c)
print('v =', v)
| 0.618435 | 0.939969 |
```
!pip3 install pprint
from pprint import pprint
!pip3 install beautifulsoup4
!pip3 install --upgrade pip
!pip3 install requests
from six.moves.urllib.parse import quote as _quote
from bs4 import BeautifulSoup as _BeautifulSoup
import requests as _requests
import urllib
import os
import pandas as pd
__BASE_URL__ = 'https://lyrics.fandom.com'
class LyricsNotFound(Exception):
__module__ = Exception.__module__
def __init__(self, message=None):
super(LyricsNotFound, self).__init__(message)
class LanguageNotFound(Exception):
__module__ = Exception.__module__
def __init__(self, message=None):
super(LanguageNotFound, self).__init__(message)
def urlize(string):
"""Convert string to LyricWikia format"""
return _quote('_'.join(string.split()))
def create_url(artist, song, language):
"""Create the URL in the LyricWikia format"""
url = __BASE_URL__ + '/wiki/{artist}:{song}'.format(artist=urlize(artist), song=urlize(song))
if language:
url += '/{language}'.format(language=urlize(language).lower())
return url
def get_lyrics_for_all_languages(artist, song, linesep='\n', timeout=None):
"""Retrieve the lyrics of the song in all languages available"""
url = create_url(artist, song, '')
response = _requests.get(url, timeout=timeout)
soup = _BeautifulSoup(response.content, "html.parser")
lyricboxes = soup.find('table', {'class': 'banner banner-song'})
result = dict()
result['default'] = get_lyrics_by_language(artist, song, '', linesep='\n', timeout=None)
for a in lyricboxes.findAll('a', href=True):
result[a.getText()] = get_lyrics_by_language(artist, song, a['href'].split('/')[-1], linesep='\n', timeout=None)
return result
def get_lyrics_by_language(artist, song, language, linesep='\n', timeout=None):
"""Retrieve the lyrics of the song in a particular language and return the first one in case
multiple versions are available."""
return get_all_lyrics(artist, song, language, linesep, timeout)[0]
def get_lyrics(artist, song, language='', linesep='\n', timeout=None):
"""Retrieve the lyrics of the song and return the first one in case
multiple versions are available."""
return get_all_lyrics(artist, song, language, linesep, timeout)[0]
def get_all_lyrics(artist, song, language='', linesep=' \n ', timeout=None):
"""Retrieve a list of all the lyrics versions of a song."""
url = create_url(artist, song, language)
response = _requests.get(url, timeout=timeout)
soup = _BeautifulSoup(response.content, "html.parser")
lyricboxes = soup.findAll('div', {'class': 'lyricbox'})
if not lyricboxes:
raise LyricsNotFound('Cannot download lyrics')
for lyricbox in lyricboxes:
for br in lyricbox.findAll('br'):
br.replace_with(linesep)
return [lyricbox.text.strip() for lyricbox in lyricboxes]
def get_songs_by_artist(artist, linesep=' \n ', timeout=None):
"""Retrieve a dataframe of all lyrics versions of a song."""
df = pd.DataFrame(columns=['Artist', 'Title'])
url = __BASE_URL__+"/Category:Songs_by_"+urlize(artist)
df = parse_page_now(url,df)
return df
def get_songs_by_language(language, linesep=' \n ', timeout=None):
"""Retrieve a dataframe of all lyrics versions of a song."""
df = pd.DataFrame(columns=['Artist', 'Title'])
url = __BASE_URL__+"/wiki/Category:Language/"+language
df = parse_page_now(url,df)
return df
def parse_page_now(url, df, timeout=None):
response = _requests.get(url, timeout=timeout)
soup = _BeautifulSoup(response.content, "html.parser")
data = soup.findAll('li',attrs={'class':'category-page__member'})
if not data:
raise LanguageNotFound('No such artist')
for div in data:
links = div.findAll('a')
for a in links:
lyric_link = a['href'].strip('/wiki/')
artist = lyric_link.split(":")[0]
title = lyric_link.split(":")[1]
if(artist == "Category"):
continue
df = df.append({'Artist': artist, 'Title': title}, ignore_index=True)
if(soup.find('div', attrs={'class':'category-page__pagination'}) == None):
return df
next_page_text = soup.find('div', attrs={'class':'category-page__pagination'}).find('a', attrs={'class':'category-page__pagination-next wds-button wds-is-secondary'})
if next_page_text != None:
next_page_url = next_page_text['href']
df = parse_page_now(next_page_url,df)
return df
class Song(object):
"""A Song backed by the LyricWikia API"""
def __init__(self, artist, title):
self.artist = artist
self.title = title
@property
def lyrics(self):
"""Song lyrics obtained by parsing the LyricWikia page"""
return get_lyrics(self.artist, self.title,'')
def __str__(self):
return "Song(artist='%s', title='%s')" % (self.artist, self.title)
def __repr__(self):
return str(self)
class Album(object):
"""An Album backed by the LyricWikia API"""
def __init__(self, artist, album_data):
self.artist = artist
self.title = album_data['album']
self.year = album_data['year']
self.songs = [Song(artist, song) for song in album_data['songs']]
def __str__(self):
return "Album(artist='%s', title='%s')" % (self.artist, self.title)
def __repr__(self):
return str(self)
class Artist(object):
"""An Artist backed by the LyricWikia API"""
__API__ = __BASE_URL__ + '/api.php?fmt=json&func=getArtist&artist={artist}'
def __init__(self, name):
url = self.__API__.format(artist=urlize(name))
data = _requests.get(url).json()
self.name = data['artist']
self.albums = [Album(self.name, album) for album in data['albums']]
def __str__(self):
return "Artist(name='%s')" % (self.name)
def __repr__(self):
return str(self)
artist_name = "Mondstille"
df = pd.DataFrame(columns=['Artist', 'Title'])
url = "https://lyrics.fandom.com/wiki/Category:Songs_by_"+artist_name
df = parse_page_now(url,df)
get_songs_by_artist("Mondstille")
get_lyrics_by_language("PSY", "Gangnam Style", "roman")
```
|
github_jupyter
|
!pip3 install pprint
from pprint import pprint
!pip3 install beautifulsoup4
!pip3 install --upgrade pip
!pip3 install requests
from six.moves.urllib.parse import quote as _quote
from bs4 import BeautifulSoup as _BeautifulSoup
import requests as _requests
import urllib
import os
import pandas as pd
__BASE_URL__ = 'https://lyrics.fandom.com'
class LyricsNotFound(Exception):
__module__ = Exception.__module__
def __init__(self, message=None):
super(LyricsNotFound, self).__init__(message)
class LanguageNotFound(Exception):
__module__ = Exception.__module__
def __init__(self, message=None):
super(LanguageNotFound, self).__init__(message)
def urlize(string):
"""Convert string to LyricWikia format"""
return _quote('_'.join(string.split()))
def create_url(artist, song, language):
"""Create the URL in the LyricWikia format"""
url = __BASE_URL__ + '/wiki/{artist}:{song}'.format(artist=urlize(artist), song=urlize(song))
if language:
url += '/{language}'.format(language=urlize(language).lower())
return url
def get_lyrics_for_all_languages(artist, song, linesep='\n', timeout=None):
"""Retrieve the lyrics of the song in all languages available"""
url = create_url(artist, song, '')
response = _requests.get(url, timeout=timeout)
soup = _BeautifulSoup(response.content, "html.parser")
lyricboxes = soup.find('table', {'class': 'banner banner-song'})
result = dict()
result['default'] = get_lyrics_by_language(artist, song, '', linesep='\n', timeout=None)
for a in lyricboxes.findAll('a', href=True):
result[a.getText()] = get_lyrics_by_language(artist, song, a['href'].split('/')[-1], linesep='\n', timeout=None)
return result
def get_lyrics_by_language(artist, song, language, linesep='\n', timeout=None):
"""Retrieve the lyrics of the song in a particular language and return the first one in case
multiple versions are available."""
return get_all_lyrics(artist, song, language, linesep, timeout)[0]
def get_lyrics(artist, song, language='', linesep='\n', timeout=None):
"""Retrieve the lyrics of the song and return the first one in case
multiple versions are available."""
return get_all_lyrics(artist, song, language, linesep, timeout)[0]
def get_all_lyrics(artist, song, language='', linesep=' \n ', timeout=None):
"""Retrieve a list of all the lyrics versions of a song."""
url = create_url(artist, song, language)
response = _requests.get(url, timeout=timeout)
soup = _BeautifulSoup(response.content, "html.parser")
lyricboxes = soup.findAll('div', {'class': 'lyricbox'})
if not lyricboxes:
raise LyricsNotFound('Cannot download lyrics')
for lyricbox in lyricboxes:
for br in lyricbox.findAll('br'):
br.replace_with(linesep)
return [lyricbox.text.strip() for lyricbox in lyricboxes]
def get_songs_by_artist(artist, linesep=' \n ', timeout=None):
"""Retrieve a dataframe of all lyrics versions of a song."""
df = pd.DataFrame(columns=['Artist', 'Title'])
url = __BASE_URL__+"/Category:Songs_by_"+urlize(artist)
df = parse_page_now(url,df)
return df
def get_songs_by_language(language, linesep=' \n ', timeout=None):
"""Retrieve a dataframe of all lyrics versions of a song."""
df = pd.DataFrame(columns=['Artist', 'Title'])
url = __BASE_URL__+"/wiki/Category:Language/"+language
df = parse_page_now(url,df)
return df
def parse_page_now(url, df, timeout=None):
response = _requests.get(url, timeout=timeout)
soup = _BeautifulSoup(response.content, "html.parser")
data = soup.findAll('li',attrs={'class':'category-page__member'})
if not data:
raise LanguageNotFound('No such artist')
for div in data:
links = div.findAll('a')
for a in links:
lyric_link = a['href'].strip('/wiki/')
artist = lyric_link.split(":")[0]
title = lyric_link.split(":")[1]
if(artist == "Category"):
continue
df = df.append({'Artist': artist, 'Title': title}, ignore_index=True)
if(soup.find('div', attrs={'class':'category-page__pagination'}) == None):
return df
next_page_text = soup.find('div', attrs={'class':'category-page__pagination'}).find('a', attrs={'class':'category-page__pagination-next wds-button wds-is-secondary'})
if next_page_text != None:
next_page_url = next_page_text['href']
df = parse_page_now(next_page_url,df)
return df
class Song(object):
"""A Song backed by the LyricWikia API"""
def __init__(self, artist, title):
self.artist = artist
self.title = title
@property
def lyrics(self):
"""Song lyrics obtained by parsing the LyricWikia page"""
return get_lyrics(self.artist, self.title,'')
def __str__(self):
return "Song(artist='%s', title='%s')" % (self.artist, self.title)
def __repr__(self):
return str(self)
class Album(object):
"""An Album backed by the LyricWikia API"""
def __init__(self, artist, album_data):
self.artist = artist
self.title = album_data['album']
self.year = album_data['year']
self.songs = [Song(artist, song) for song in album_data['songs']]
def __str__(self):
return "Album(artist='%s', title='%s')" % (self.artist, self.title)
def __repr__(self):
return str(self)
class Artist(object):
"""An Artist backed by the LyricWikia API"""
__API__ = __BASE_URL__ + '/api.php?fmt=json&func=getArtist&artist={artist}'
def __init__(self, name):
url = self.__API__.format(artist=urlize(name))
data = _requests.get(url).json()
self.name = data['artist']
self.albums = [Album(self.name, album) for album in data['albums']]
def __str__(self):
return "Artist(name='%s')" % (self.name)
def __repr__(self):
return str(self)
artist_name = "Mondstille"
df = pd.DataFrame(columns=['Artist', 'Title'])
url = "https://lyrics.fandom.com/wiki/Category:Songs_by_"+artist_name
df = parse_page_now(url,df)
get_songs_by_artist("Mondstille")
get_lyrics_by_language("PSY", "Gangnam Style", "roman")
| 0.546738 | 0.154312 |
# Author : Pulla Nagendra Babu
## Task 2 : Prediction using Unupervised Machine Learning
## GRIP @ The Sparks Foundation
In this K-means clustering task I tried to predict the optimum number of clusters and represent it visually from the given ‘Iris’ dataset.
## Technical Stack : Sikit Learn, Numpy Array, Scipy, Pandas, Matplotlib
```
from sklearn import datasets
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.patches as mpatches
import sklearn.metrics as sm
from mpl_toolkits.mplot3d import Axes3D
from scipy.cluster.hierarchy import linkage,dendrogram
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
```
## Step 1 - Loading the dataset
```
iris = datasets.load_iris()
print(iris.data)
print(iris.target_names)
print(iris.target)
x = iris.data
y = iris.target
```
## Step 2 - Visualizing the input data and its Hierarchy
```
#Plotting
fig = plt.figure(1, figsize=(7,5))
ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)
ax.scatter(x[:, 3], x[:, 0], x[:, 2], edgecolor="k", s=50)
ax.set_xlabel("Petal width")
ax.set_ylabel("Sepal length")
ax.set_zlabel("Petal length")
plt.title("Iris Clustering K Means=3", fontsize=14)
plt.show()
#Hierachy Clustering
hier=linkage(x,"ward")
max_d=7.08
plt.figure(figsize=(15,8))
plt.title('Iris Hierarchical Clustering Dendrogram')
plt.xlabel('Species')
plt.ylabel('distance')
dendrogram(
hier,
truncate_mode='lastp',
p=50,
leaf_rotation=90.,
leaf_font_size=8.,
)
plt.axhline(y=max_d, c='k')
plt.show()
```
## Step 3 - Data Preprocessing
```
x = pd.DataFrame(iris.data, columns=['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width'])
y = pd.DataFrame(iris.target, columns=['Target'])
x.head()
y.head()
```
## Step 4 - Model Training
```
iris_k_mean_model = KMeans(n_clusters=3)
iris_k_mean_model.fit(x)
print(iris_k_mean_model.labels_)
print(iris_k_mean_model.cluster_centers_)
```
## Step 5 - Visualizing the Model Cluster
```
plt.figure(figsize=(14,6))
colors = np.array(['red', 'green', 'blue'])
predictedY = np.choose(iris_k_mean_model.labels_, [1, 0, 2]).astype(np.int64)
plt.subplot(1, 2, 1)
plt.scatter(x['Petal Length'], x['Petal Width'], c=colors[y['Target']])
plt.title('Before classification')
plt.subplot(1, 2, 2)
plt.scatter(x['Petal Length'], x['Petal Width'], c=colors[predictedY])
plt.title("Model's classification")
```
## Step 6 - Calculating the Accuracy and Confusion Matrix
```
sm.accuracy_score(predictedY, y['Target'])
sm.confusion_matrix(predictedY, y['Target'])
```
In a confusion matrix, the predicted class labels (0, 1, 2) are written along the top (column names). The true class labels (Iris-setosa, etc.) are written along the right side. Each cell in the matrix is a count of how many instances of a true class where classified as each of the predicted classes.
## Conclusion
### I was able to successfully carry-out prediction using Unsupervised Machine Learning task and was able to evaluate the model's clustering accuracy score.
# Thank You
|
github_jupyter
|
from sklearn import datasets
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.patches as mpatches
import sklearn.metrics as sm
from mpl_toolkits.mplot3d import Axes3D
from scipy.cluster.hierarchy import linkage,dendrogram
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
iris = datasets.load_iris()
print(iris.data)
print(iris.target_names)
print(iris.target)
x = iris.data
y = iris.target
#Plotting
fig = plt.figure(1, figsize=(7,5))
ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)
ax.scatter(x[:, 3], x[:, 0], x[:, 2], edgecolor="k", s=50)
ax.set_xlabel("Petal width")
ax.set_ylabel("Sepal length")
ax.set_zlabel("Petal length")
plt.title("Iris Clustering K Means=3", fontsize=14)
plt.show()
#Hierachy Clustering
hier=linkage(x,"ward")
max_d=7.08
plt.figure(figsize=(15,8))
plt.title('Iris Hierarchical Clustering Dendrogram')
plt.xlabel('Species')
plt.ylabel('distance')
dendrogram(
hier,
truncate_mode='lastp',
p=50,
leaf_rotation=90.,
leaf_font_size=8.,
)
plt.axhline(y=max_d, c='k')
plt.show()
x = pd.DataFrame(iris.data, columns=['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width'])
y = pd.DataFrame(iris.target, columns=['Target'])
x.head()
y.head()
iris_k_mean_model = KMeans(n_clusters=3)
iris_k_mean_model.fit(x)
print(iris_k_mean_model.labels_)
print(iris_k_mean_model.cluster_centers_)
plt.figure(figsize=(14,6))
colors = np.array(['red', 'green', 'blue'])
predictedY = np.choose(iris_k_mean_model.labels_, [1, 0, 2]).astype(np.int64)
plt.subplot(1, 2, 1)
plt.scatter(x['Petal Length'], x['Petal Width'], c=colors[y['Target']])
plt.title('Before classification')
plt.subplot(1, 2, 2)
plt.scatter(x['Petal Length'], x['Petal Width'], c=colors[predictedY])
plt.title("Model's classification")
sm.accuracy_score(predictedY, y['Target'])
sm.confusion_matrix(predictedY, y['Target'])
| 0.740268 | 0.987079 |
# The objective of this Jupyter Notebook is answer the next question:
## 1.What is the better date to travel to Seattle and find a good price for an Airbnb?
# Data preparation
### The Objective of this jupyter notebook is identify in wich time of the year is most expensive rent a airbnb in seattle
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
%matplotlib inline
seattle_calendar = pd.read_csv('./Seattle/calendar.csv')
seattle_calendar.head()
```
#### We have a cuple of problems here, our price is an string with the character of dolar "$" before, and our date is an string we have to format to date.
```
seattle_calendar["price"].value_counts
```
#### We have to convert to datetime the "date" value in our dataset because currently is string.
```
seattle_calendar["date_format"] = pd.to_datetime(seattle_calendar['date'], format='%Y-%m-%d')
seattle_calendar.head()
```
#### The next step is remove the dolar character from the price, to do this we will split the price in two columns and then we will insert into our dataset again, just the price, after that we will conver the price value in float type.
```
seattle_calendar_splited = seattle_calendar
price_splited= seattle_calendar_splited["price"].str.split("$", n = 1, expand = True)
seattle_calendar_splited["total_price"] = price_splited[1]
seattle_calendar_splited["total_price"] = seattle_calendar_splited["total_price"].str.replace(',', '').astype(float)
seattle_calendar_splited.head()
```
#### Now we have our price in a new column called total_price, with the type float.
```
seattle_calendar_splited = seattle_calendar_splited.drop(columns=["listing_id","date","available","minimum_nights","maximum_nights"])
seattle_calendar_splited.head()
seattle_calendar_splited.total_price.mean()
```
#### We have to verify the percentaje of NA's in our dataset.
```
seattle_calendar_splited['date_format'].isna().mean()
```
#### And plot our time series with the mean of price.
```
seattle_calendar_splited_date = seattle_calendar_splited
seattle_calendar_splited_date['date_format'] = seattle_calendar_splited_date.apply(lambda row: pd.Timestamp(row.date_format), axis=1 )
seattle_calendar_splited_date = seattle_calendar_splited_date.set_index(seattle_calendar_splited_date.date_format).drop('date_format', axis=1)
seattle_calendar_splited_date.sort_index(inplace=True)
seattle_calendar_splited_date.rolling('10d').mean().plot(figsize=(20,20),ylim =(0, 200), grid='true')
```
#### As a conclusion we can said that in December is our most expensive month of 2019.
#### The 2020 Yer will have an increase of about 25 dollars the cost of airbnb in seattle.
#### The most expensive month of 2020 will be august. We have to mention that the dataset that we have dont cover all the 2020 year.
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
%matplotlib inline
seattle_calendar = pd.read_csv('./Seattle/calendar.csv')
seattle_calendar.head()
seattle_calendar["price"].value_counts
seattle_calendar["date_format"] = pd.to_datetime(seattle_calendar['date'], format='%Y-%m-%d')
seattle_calendar.head()
seattle_calendar_splited = seattle_calendar
price_splited= seattle_calendar_splited["price"].str.split("$", n = 1, expand = True)
seattle_calendar_splited["total_price"] = price_splited[1]
seattle_calendar_splited["total_price"] = seattle_calendar_splited["total_price"].str.replace(',', '').astype(float)
seattle_calendar_splited.head()
seattle_calendar_splited = seattle_calendar_splited.drop(columns=["listing_id","date","available","minimum_nights","maximum_nights"])
seattle_calendar_splited.head()
seattle_calendar_splited.total_price.mean()
seattle_calendar_splited['date_format'].isna().mean()
seattle_calendar_splited_date = seattle_calendar_splited
seattle_calendar_splited_date['date_format'] = seattle_calendar_splited_date.apply(lambda row: pd.Timestamp(row.date_format), axis=1 )
seattle_calendar_splited_date = seattle_calendar_splited_date.set_index(seattle_calendar_splited_date.date_format).drop('date_format', axis=1)
seattle_calendar_splited_date.sort_index(inplace=True)
seattle_calendar_splited_date.rolling('10d').mean().plot(figsize=(20,20),ylim =(0, 200), grid='true')
| 0.528777 | 0.918334 |
# The gradient regularized asymptotic correction.
## I. The ionization potenital issue
Often with density functional theory one would like to do something interesting with the resulting wavefunction; however a number of issues exsist with convention DFT computations.
Let us first compute Ionization Potential at several different levels of the theory and then compare the results to the NIST database (http://physics.nist.gov).
```
import psi4
import numpy as np
import ks_helper as ks
%matplotlib inline
import matplotlib.pyplot as plt
from pkg_resources import parse_version
if parse_version(psi4.__version__) >= parse_version('1.3a1'):
build_superfunctional = psi4.driver.dft.build_superfunctional
else:
build_superfunctional = psi4.driver.dft_funcs.build_superfunctional
mol = psi4.geometry("""
Ne
symmetry c1
""")
options = {'BASIS': 'aug-cc-pVDZ',
'NAT_ORBS': True,
'NUM_ROOTS': 2,
'QC_MODULE': 'DETCI'}
psi4.set_options(options)
scf_e, scf_wfn = psi4.energy("SCF", return_wfn=True)
scf_e, dft_wfn = psi4.energy("PBE0", return_wfn=True)
scf_e, ci_wfn = psi4.energy("CISD", return_wfn=True)
```
Now that we have the Wavefunctions we can then compute the ionization potenital. For SCF theories this is the the energy of the HOMO while for more advanced theories, it is the differente between the ground and excited states. We can obtain the IP for each level of theory in the following way:
```
scf_ip = -1 * scf_wfn.epsilon_a().np[scf_wfn.nalpha()-1] * psi4.constants.hartree2ev
dft_ip = -1 * dft_wfn.epsilon_a().np[dft_wfn.nalpha()-1] * psi4.constants.hartree2ev
ci_ip = -1 * (psi4.core.variable("CI ROOT 0 TOTAL ENERGY") - psi4.core.variable("CI ROOT 1 TOTAL ENERGY")) * psi4.constants.hartree2ev
print("Neon Ionization Potential (eV)")
print("SCF %10.4f" % scf_ip)
print("PBE0 %10.4f" % dft_ip)
print("CISD %10.4f" % ci_ip)
print("NIST %10.4f" % 21.5645)
```
## I. Density shapes
If we think carefully about the ionization potenital and what this really means it can be summed up susinctly as "How hard is it for the electron to leave the system" or "How deep is our potenital well"? A easy way to view the shape of the potenital for a given method is to inspect the density as a function of spatial extent. A weaker potential would mean that the density extends further than it should and our IP would be too small. While a stronger potenital would mean that the density is more concentrated and the IP is too large. As we only have a single atom we can project our density into grid space and then sort the results as a function of distance from the nucleous.
```
def one_electron_radial(matrices, Vpot):
"""
For every matrix in matrices, computes the radial density using the incoming Vpot object.
"""
results = [[] for x in matrices]
Rlist = []
points_func = Vpot.properties()[0]
superfunc = Vpot.functional()
xc_e = 0.0
vv10_e = 0.0
# First loop over the outer set of blocks
for block in range(Vpot.nblocks()):
# Obtain general grid information
grid = Vpot.get_block(block)
x = np.array(grid.x())
y = np.array(grid.y())
z = np.array(grid.z())
npoints = z.shape[0]
# Get radial information
R = ((x ** 2) + (y ** 2) + (z ** 2)) ** 0.5
Rlist.append(R)
# Loop over and compute the density for each matrix
for num, mat in enumerate(matrices):
points_func.set_pointers(mat)
points_func.compute_points(grid)
results[num].append(np.array(points_func.point_values()["RHO_A"])[:npoints])
# Stack R
Rlist = np.hstack(Rlist)
# Get the R unique and sort
unique, uidx = np.unique(Rlist, return_index=True)
Rlist = Rlist[uidx]
# Numpy is magic, it would be worthwhile to consider excatly what this does
results = [np.hstack(x)[uidx] for x in results]
return (Rlist, results)
# Compute the radial densities
sup = build_superfunctional("SVWN", True)[0]
Vpot = psi4.core.VBase.build(scf_wfn.basisset(), sup, "RV")
Vpot.initialize()
R, data = one_electron_radial([scf_wfn.Da(), dft_wfn.Da(), ci_wfn.Da()], Vpot)
```
Now that we have the radial densities, we build a function that will plot the density as a ratio between
the method a benchmark. Here we will select the CISD densities as the benchmark and plot the SCF and DFT quanties against them.
```
def plot_scatter_ratio(dictionary, bench, x_values):
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
for k, v in dictionary.items():
ax1.plot(x_values, v / bench, label=k)
ax1.legend(loc='upper left')
ax1.set_xlim(0, 7)
ax1.set_xlabel('R')
ax1.set_ylabel('rho(r) / rho(r)_bench')
plt.plot([0, 7], [1.0, 1.0], 'k-')
```
Plotting this data we note that the SCF density falls off too rapidly, meaning that potential is too deep while the DFT density falls off far too slowly indicating the potenital is too shallow. For example many theories examine the overlap between densities. From this chart we can surmise issues that would arise with both DFT and HF theories.
```
data_dict = {"SCF": data[0], "DFT": data[1]}
plot_scatter_ratio(data_dict, data[2], R)
```
## 2. The GRAC Correction
The question then arises, what if we could correct the DFT potential asymptotic so that it recovers the correct density and therefore the correction ionization potenital. Effectively we want to accomplish:
$$v_{\rm{xc}}({\bf r}) = -\frac{1}{r} + v_{\rm{xc}}(\infty)$$
However, for correct asympotics this would require the exact $v_{\rm{xc}}$ potenital. As we operate completely in the Density Funcitonal Approximation domain, this is not a possible task. Instead, the GRAC correction requires that we know our Ionization Potential and we can replace the complex DFT term with the following:
$$v_{\rm{xc}}({\bf r}) = -\frac{1}{r} + (I_{\bf P} + \epsilon_N)$$
Where $\epsilon_N$ is our HOMO energy. This is quite clever as we now only need a functional that correctly goes to the $\frac{1}{r}$ asymptotic. As no general functional accomplishes this task a special functional, "LB94", was built with exactly this property. The downside is that this functionals characteristics are quite poor in the "bulk" region or the region near the atom itself. To generalize this a switching function is introduced:
$$
\begin{align}
v_{\rm{xc}}^{GRAC}(\rho({\bf r})) &= [1 - f[x({\bf r})]]v_{\rm{XC}}^{\rm bulk}+ f[x({\bf r})]v_{\rm{XC}}^{\rm Asym.} \\ \\
f[x({\bf r})] &= \frac{1}{1 + e^{-\alpha[x({\bf r}) - \beta]}}\\\\
x({\bf r}) &= \frac{| \nabla \rho({\bf r}) | }{\rho^\frac{4}{3}({\bf r})}
\end{align}
$$
Thus, in such a way we seemlessly switch between the bulk and asymptotic potenitals. In addition, such a formulation allows us to apply the GRAC correction to any underlying functional in a general way.
```
# Build a LB 94 functional
lb94_func = psi4.core.SuperFunctional.blank()
lb94_func.set_max_points(500000)
lb94_func.set_deriv(1)
# Add member functionals
tmp_func = psi4.core.LibXCFunctional('XC_GGA_X_LB', True)
# We need to scale this for the amount of exact exchange is in the functional
tmp_func.set_alpha(0.75)
lb94_func.add_x_functional(tmp_func)
# Dont forget VWN correlation!
tmp_func = psi4.core.LibXCFunctional('XC_LDA_C_VWN', True)
lb94_func.add_c_functional(tmp_func)
# Allocate the functional
lb94_func.allocate()
grac_shift = (21.5645 - dft_ip) / psi4.constants.hartree2ev
def compute_V_GRAC(D, Vpot):
"""
Computes the XC energy and the V_xc Fock term
"""
grac_alpha = 0.5
grac_beta = 40.0
nbf = D.shape[0]
Varr = np.zeros((nbf, nbf))
points_func = Vpot.properties()[0]
superfunc = Vpot.functional()
total_e = 0.0
for x in range(Vpot.nblocks()):
# Grid data
grid = Vpot.get_block(x)
w = np.array(grid.w())
npoints = w.shape[0]
points_func.compute_points(grid)
lpos = np.array(grid.functions_local_to_global())
nfunctions = lpos.shape[0]
phi = np.array(points_func.basis_values()["PHI"])[:npoints, :nfunctions]
rho = np.array(points_func.point_values()["RHO_A"])[:npoints]
gamma = np.array(points_func.point_values()["GAMMA_AA"])[:npoints]
grac_x = (gamma ** 0.5) / (rho ** (4.0/3.0))
grac_fx = 1.0 / (1.0 + np.exp(-grac_alpha * (grac_x - grac_beta)))
ret = superfunc.compute_functional(points_func.point_values(), -1)
lbret = lb94_func.compute_functional(points_func.point_values(), -1)
total_e += np.vdot(np.array(ret["V"])[:npoints], w)
v_rho_a = ( 1 - grac_fx ) * (np.array(ret["V_RHO_A"])[:npoints] - grac_shift)
v_rho_a += grac_fx * np.array(lbret["V_RHO_A"])[:npoints]
v_rho_a *= 0.5 * w
Vtmp = np.einsum('pb,p,pa->ab', phi, v_rho_a, phi)
# GGA
rho_x = np.array(points_func.point_values()["RHO_AX"])[:npoints]
rho_y = np.array(points_func.point_values()["RHO_AY"])[:npoints]
rho_z = np.array(points_func.point_values()["RHO_AZ"])[:npoints]
phi_x = np.array(points_func.basis_values()["PHI_X"])[:npoints, :nfunctions]
phi_y = np.array(points_func.basis_values()["PHI_Y"])[:npoints, :nfunctions]
phi_z = np.array(points_func.basis_values()["PHI_Z"])[:npoints, :nfunctions]
v_gamma = (1 - grac_fx) * np.array(ret["V_GAMMA_AA"])[:npoints]
v_gamma *= 2.0 * w
Vtmp += np.einsum('pb,p,p,pa->ab', phi, v_gamma, rho_x, phi_x)
Vtmp += np.einsum('pb,p,p,pa->ab', phi, v_gamma, rho_y, phi_y)
Vtmp += np.einsum('pb,p,p,pa->ab', phi, v_gamma, rho_z, phi_z)
Varr[(lpos[:, None], lpos)] += Vtmp + Vtmp.T
return total_e, Varr
# Test our function the the handy ks solver.
grac_e, grac_data = ks.ks_solver("PBE0", mol, options, compute_V_GRAC)
```
We can now check against the Psi4 reference implementation. Note that we set the GRAC shift on either side of the computation so that we do not contaminate other computations.
```
psi4.set_options({"dft_grac_shift": grac_shift})
print(psi4.energy("PBE0"))
psi4.set_options({"dft_grac_shift": 0.0})
```
## 3. Inspecting the outcome
Now that we applied the GRAC shift we can observe the density and see how well we do.
```
grac_ip = -1 * grac_data["eigenvalues"].np[dft_wfn.nalpha()-1] * psi4.constants.hartree2ev
print("Neon Ionization Potential (eV)")
print("SCF %10.4f" % scf_ip)
print("PBE0 %10.4f" % dft_ip)
print("PBE0-AC %10.4f" % grac_ip)
print("CISD %10.4f" % ci_ip)
print("NIST %10.4f" % 21.5645)
```
We observe that our ionization potenital is now much closer to the correct ionization potenital and while the density is not perfect, it is much closer to the CISD density.
```
R, data = one_electron_radial([scf_wfn.Da(), dft_wfn.Da(), grac_data["Da"], ci_wfn.Da()], Vpot)
data_dict = {"SCF": data[0], "DFT": data[1], "DFT-AC": data[2]}
plot_scatter_ratio(data_dict, data[3], R)
```
Refs:
- M. Gruning, O. V. Gritsenko, S. J. A. van Gisbergen, and E. J. BaerendsJ. Chem. Phys., **2001**, *114*, 652
|
github_jupyter
|
import psi4
import numpy as np
import ks_helper as ks
%matplotlib inline
import matplotlib.pyplot as plt
from pkg_resources import parse_version
if parse_version(psi4.__version__) >= parse_version('1.3a1'):
build_superfunctional = psi4.driver.dft.build_superfunctional
else:
build_superfunctional = psi4.driver.dft_funcs.build_superfunctional
mol = psi4.geometry("""
Ne
symmetry c1
""")
options = {'BASIS': 'aug-cc-pVDZ',
'NAT_ORBS': True,
'NUM_ROOTS': 2,
'QC_MODULE': 'DETCI'}
psi4.set_options(options)
scf_e, scf_wfn = psi4.energy("SCF", return_wfn=True)
scf_e, dft_wfn = psi4.energy("PBE0", return_wfn=True)
scf_e, ci_wfn = psi4.energy("CISD", return_wfn=True)
scf_ip = -1 * scf_wfn.epsilon_a().np[scf_wfn.nalpha()-1] * psi4.constants.hartree2ev
dft_ip = -1 * dft_wfn.epsilon_a().np[dft_wfn.nalpha()-1] * psi4.constants.hartree2ev
ci_ip = -1 * (psi4.core.variable("CI ROOT 0 TOTAL ENERGY") - psi4.core.variable("CI ROOT 1 TOTAL ENERGY")) * psi4.constants.hartree2ev
print("Neon Ionization Potential (eV)")
print("SCF %10.4f" % scf_ip)
print("PBE0 %10.4f" % dft_ip)
print("CISD %10.4f" % ci_ip)
print("NIST %10.4f" % 21.5645)
def one_electron_radial(matrices, Vpot):
"""
For every matrix in matrices, computes the radial density using the incoming Vpot object.
"""
results = [[] for x in matrices]
Rlist = []
points_func = Vpot.properties()[0]
superfunc = Vpot.functional()
xc_e = 0.0
vv10_e = 0.0
# First loop over the outer set of blocks
for block in range(Vpot.nblocks()):
# Obtain general grid information
grid = Vpot.get_block(block)
x = np.array(grid.x())
y = np.array(grid.y())
z = np.array(grid.z())
npoints = z.shape[0]
# Get radial information
R = ((x ** 2) + (y ** 2) + (z ** 2)) ** 0.5
Rlist.append(R)
# Loop over and compute the density for each matrix
for num, mat in enumerate(matrices):
points_func.set_pointers(mat)
points_func.compute_points(grid)
results[num].append(np.array(points_func.point_values()["RHO_A"])[:npoints])
# Stack R
Rlist = np.hstack(Rlist)
# Get the R unique and sort
unique, uidx = np.unique(Rlist, return_index=True)
Rlist = Rlist[uidx]
# Numpy is magic, it would be worthwhile to consider excatly what this does
results = [np.hstack(x)[uidx] for x in results]
return (Rlist, results)
# Compute the radial densities
sup = build_superfunctional("SVWN", True)[0]
Vpot = psi4.core.VBase.build(scf_wfn.basisset(), sup, "RV")
Vpot.initialize()
R, data = one_electron_radial([scf_wfn.Da(), dft_wfn.Da(), ci_wfn.Da()], Vpot)
def plot_scatter_ratio(dictionary, bench, x_values):
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
for k, v in dictionary.items():
ax1.plot(x_values, v / bench, label=k)
ax1.legend(loc='upper left')
ax1.set_xlim(0, 7)
ax1.set_xlabel('R')
ax1.set_ylabel('rho(r) / rho(r)_bench')
plt.plot([0, 7], [1.0, 1.0], 'k-')
data_dict = {"SCF": data[0], "DFT": data[1]}
plot_scatter_ratio(data_dict, data[2], R)
# Build a LB 94 functional
lb94_func = psi4.core.SuperFunctional.blank()
lb94_func.set_max_points(500000)
lb94_func.set_deriv(1)
# Add member functionals
tmp_func = psi4.core.LibXCFunctional('XC_GGA_X_LB', True)
# We need to scale this for the amount of exact exchange is in the functional
tmp_func.set_alpha(0.75)
lb94_func.add_x_functional(tmp_func)
# Dont forget VWN correlation!
tmp_func = psi4.core.LibXCFunctional('XC_LDA_C_VWN', True)
lb94_func.add_c_functional(tmp_func)
# Allocate the functional
lb94_func.allocate()
grac_shift = (21.5645 - dft_ip) / psi4.constants.hartree2ev
def compute_V_GRAC(D, Vpot):
"""
Computes the XC energy and the V_xc Fock term
"""
grac_alpha = 0.5
grac_beta = 40.0
nbf = D.shape[0]
Varr = np.zeros((nbf, nbf))
points_func = Vpot.properties()[0]
superfunc = Vpot.functional()
total_e = 0.0
for x in range(Vpot.nblocks()):
# Grid data
grid = Vpot.get_block(x)
w = np.array(grid.w())
npoints = w.shape[0]
points_func.compute_points(grid)
lpos = np.array(grid.functions_local_to_global())
nfunctions = lpos.shape[0]
phi = np.array(points_func.basis_values()["PHI"])[:npoints, :nfunctions]
rho = np.array(points_func.point_values()["RHO_A"])[:npoints]
gamma = np.array(points_func.point_values()["GAMMA_AA"])[:npoints]
grac_x = (gamma ** 0.5) / (rho ** (4.0/3.0))
grac_fx = 1.0 / (1.0 + np.exp(-grac_alpha * (grac_x - grac_beta)))
ret = superfunc.compute_functional(points_func.point_values(), -1)
lbret = lb94_func.compute_functional(points_func.point_values(), -1)
total_e += np.vdot(np.array(ret["V"])[:npoints], w)
v_rho_a = ( 1 - grac_fx ) * (np.array(ret["V_RHO_A"])[:npoints] - grac_shift)
v_rho_a += grac_fx * np.array(lbret["V_RHO_A"])[:npoints]
v_rho_a *= 0.5 * w
Vtmp = np.einsum('pb,p,pa->ab', phi, v_rho_a, phi)
# GGA
rho_x = np.array(points_func.point_values()["RHO_AX"])[:npoints]
rho_y = np.array(points_func.point_values()["RHO_AY"])[:npoints]
rho_z = np.array(points_func.point_values()["RHO_AZ"])[:npoints]
phi_x = np.array(points_func.basis_values()["PHI_X"])[:npoints, :nfunctions]
phi_y = np.array(points_func.basis_values()["PHI_Y"])[:npoints, :nfunctions]
phi_z = np.array(points_func.basis_values()["PHI_Z"])[:npoints, :nfunctions]
v_gamma = (1 - grac_fx) * np.array(ret["V_GAMMA_AA"])[:npoints]
v_gamma *= 2.0 * w
Vtmp += np.einsum('pb,p,p,pa->ab', phi, v_gamma, rho_x, phi_x)
Vtmp += np.einsum('pb,p,p,pa->ab', phi, v_gamma, rho_y, phi_y)
Vtmp += np.einsum('pb,p,p,pa->ab', phi, v_gamma, rho_z, phi_z)
Varr[(lpos[:, None], lpos)] += Vtmp + Vtmp.T
return total_e, Varr
# Test our function the the handy ks solver.
grac_e, grac_data = ks.ks_solver("PBE0", mol, options, compute_V_GRAC)
psi4.set_options({"dft_grac_shift": grac_shift})
print(psi4.energy("PBE0"))
psi4.set_options({"dft_grac_shift": 0.0})
grac_ip = -1 * grac_data["eigenvalues"].np[dft_wfn.nalpha()-1] * psi4.constants.hartree2ev
print("Neon Ionization Potential (eV)")
print("SCF %10.4f" % scf_ip)
print("PBE0 %10.4f" % dft_ip)
print("PBE0-AC %10.4f" % grac_ip)
print("CISD %10.4f" % ci_ip)
print("NIST %10.4f" % 21.5645)
R, data = one_electron_radial([scf_wfn.Da(), dft_wfn.Da(), grac_data["Da"], ci_wfn.Da()], Vpot)
data_dict = {"SCF": data[0], "DFT": data[1], "DFT-AC": data[2]}
plot_scatter_ratio(data_dict, data[3], R)
| 0.582847 | 0.925432 |
```
import holoviews as hv
hv.extension('bokeh')
hv.opts.defaults(hv.opts.Curve(width=500),
hv.opts.Points(width=500),
hv.opts.Image(width=500, colorbar=True, cmap='Viridis'))
import numpy as np
import scipy.signal
import scipy.linalg
```
# Estimadores adaptivos parte I
Hasta ahora hemos estudiando sistemas lineales donde:
- sus coeficientes quedan fijos luego del diseño y son constantes en el tiempo
- hacen supuestos sobre los estadísticos de la señal/ruido
¿Qué podemos hacer si
- no podemos hacer supuestos sobre los estadísticos?
- los estadísticos de la señal/ruido cambian en el tiempo?
- estamos en un escenario donde los datos llegan continuamente (data streaming)?
Cuando los estadísticos cambian en el tiempo decimos que la señal es **no estacionaria**.
En estos casos necesitamos un estimador de tipo **adaptivo**, es decir sistemas y filtros cuyos coeficientes se pueden **adaptar** a medida que llegan nuevos datos. Estos estimadores se diseñan de acuerdo a un método de optimización que es *online*
La siguiente figura muestra algunos ejemplos de aplicaciones de sistemas adaptivos
<img src="../images/adaptive-systems1.png" width="700">
El método de optimización online que utilizaremos principalmente en este curso es el gradiente descendente estocástico. Revisemos a continuación los fundamentos.
## Gradiente descendente
Sea un vector de pesos $w$ de largo $L+1$ que guarda los coeficientes de un estimador
Sea ahora una función de costo que mapea el vector de pesos a un número real
$$
J(w): \mathbb{R}^{L+1} \to \mathbb{R}
$$
La función de costo debe ser tal que a menor $J$ menor sea el error del estimador
Para entrenar un estimador o filtro adaptivo se tienen los siguientes pasos conceptuales
1. Partimos de una solución inicial $w_0$
1. Modificamos iterativamente $w$ tal que $J(w_{t+1}) < J(w_t)$
1. Nos detenemos al cumplir un cierto criterio
Para modificar iterativamete y eficientemente los pesos utilizaremos la regla del **gradiente descendente** (GD)
$$
w_{t+1} = w_t - \mu \frac{dJ(w)}{dw},
$$
donde $\mu$ se conoce como tasa de aprendizaje o "paso"
- Imaginemos $J$ como una superficie de $L+1$ dimensiones
- En cada punto el gradiente negativo de $J$ nos indica hacia donde está el descenso más abrupto
- La tasa $\mu$ nos da el largo del salto entre $w_t$ y $w_{t+1}$
Observando la **expansión de Taylor de primer orden** de $J$ en $w_{t}$
$$
\begin{align}
J(w_{t+1}) &= J(w_t) + \frac{dJ(w_t)}{dw} (w_{t+1} - w_{t}) \nonumber \\
&= J(w_t) -\mu \left \| \frac{dJ(w_t)}{dw} \right \|^2 \leq J(w_t) \nonumber
\end{align}
$$
es decir que usando la regla GD con $\mu>0$ y asumiendo que $J$ es convexo entonces se cumple que $J$ siempre decrece monotónicamente.
La siguiente gráficas interactivas muestran una superficie de costo no convexa para un parámetro unidimensional. Cada punto representa una solución que parte desde una posición inicial distinta. Las flechas corresponden a la derivada multiplicada por la tasa de aprendizaje.
Estudie la evolución de las tres soluciones en cada caso. En primer lugar se utiliza $\mu=0.05$
```
J = lambda w : (w-1)**2 + 0.2*np.sin(2*np.pi*w) # Función de costo
gradJ = lambda w : 2*(w-1) + 0.2*2*np.pi*np.cos(2*np.pi*w) # Gradiente
mu = 0.05 # Tasa de aprendizaje
iteraciones = 15
wt = np.zeros(shape=(iteraciones, 3))
wt[0, :] = np.array([0.05, 0.4, 1.9]) # Solución inicial
w_plot = np.linspace(0, 2, num=100)
for k in range(1, iteraciones):
wt[k, :] = wt[k-1, :] - mu*gradJ(wt[k-1, :])
loss_surface = hv.Curve((w_plot, J(w_plot)), 'w', 'J')
hMap = hv.HoloMap(kdims='Iteración')
for k in range(iteraciones):
dots = hv.Points((wt[k, :], J(wt[k, :]))).opts(size=10, color='k')
mag = mu*gradJ(wt[k, :])
angle = np.pi/2 - np.sign(-mag)*np.pi/2
mag = np.abs(mag)
arrows = hv.VectorField((wt[k, :], J(wt[k, :]), angle, mag)).opts(pivot='tail',
magnitude=hv.dim('Magnitude'),
rescale_lengths=False)
hMap[k] = dots * arrows
loss_surface * hMap
```
:::{warning}
Dependiendo de donde partimos la solución final es distinta. El gradiente descedente puede quedarse "atorado" en un mínimo local o en un punto silla
:::
Ahora observe como evolucionan las tres soluciones con $\mu=0.5$, es decir 10 veces más grande que el caso anterior
```
J = lambda w : (w-1)**2 + 0.2*np.sin(2*np.pi*w) # Función de costo
gradJ = lambda w : 2*(w-1) + 0.2*2*np.pi*np.cos(2*np.pi*w) # Gradiente
mu = 0.5 # Tasa de aprendizaje
iteraciones = 15
wt = np.zeros(shape=(iteraciones, 3))
wt[0, :] = np.array([0.05, 0.4, 1.9]) # Solución inicial
w_plot = np.linspace(0, 2, num=100)
for k in range(1, iteraciones):
wt[k, :] = wt[k-1, :] - mu*gradJ(wt[k-1, :])
loss_surface = hv.Curve((w_plot, J(w_plot)), 'w', 'J')
hMap = hv.HoloMap(kdims='Iteración')
for k in range(iteraciones):
dots = hv.Points((wt[k, :], J(wt[k, :]))).opts(size=10, color='k')
mag = mu*gradJ(wt[k, :])
angle = np.pi/2 - np.sign(-mag)*np.pi/2
mag = np.abs(mag)
arrows = hv.VectorField((wt[k, :], J(wt[k, :]), angle, mag)).opts(pivot='tail',
magnitude=hv.dim('Magnitude'),
rescale_lengths=False)
hMap[k] = dots * arrows
loss_surface * hMap
```
:::{warning}
Si la tasa de aprendizaje es muy alta, los pasos son muy largos y podríamos no converger a un punto estacionario
:::
Los ejemplos anteriores nos han mostrado algunas de las limitaciones del algoritmo de gradiente descendente. Es importante tenerlas en cuenta cuando lo utilicemos en nuestras aplicaciones
## Gradiente descendente en el filtro de Wiener
Para el filtro de Wiener teníamos que
$$
J(h) = \sigma_d^2 - 2 \textbf{h}^T R_{ud} + \textbf{h}^T R_{uu} \textbf{h},
$$
por ende
$$
\frac{dJ(h)}{dh} = -2 R_{ud} + 2 R_{uu} \textbf{h}
$$
y finalmente
$$
\textbf{h}_{t+1} = \textbf{h}_{t} (I - 2 \mu R_{uu}) + 2\mu R_{ud}
$$
En este caso la condición para una convergencia estable es
$$
0 < \mu < \frac{1}{\lambda_{\text{max}}},
$$
donde $\lambda_{\text{max}}$ es valor propio más grande de $R_{uu}$
(La prueba de esto puede encontrarse en *Haykin, "Adaptive filter theory", Sección 4.3*)
## Gradiente descendente estocástico (SGD)
El filtro de Wiener es óptimo pero no adaptivo:
- Requiere de $N$ muestras de $u$ y $d$ para estimar $R_{ud}$ y $R_{uu}$
- Los pesos se adaptan luego de haber presentado las $N$ muestras: Es una estrategia de tipo **batch**
- Asume que la señal es estacionaria
Si nuestros son no estacionarios significa que debemos adaptar el filtro a medida que nuevas muestras son observadas
. Para lograr esto podemos usar la versión estocástica del GD: SGD
En SGD:
- los pesos se adaptan luego de haber presentado una sola muestra o un conjunto pequeño de muestras (mini-batch)
- no hay garantía de llegar al óptimo en un problema convexo, pero es más eficiente computacionalmente que GD
El siguiente esquema muestra una comparación entre la trayectoria de $w$ cuando se usa GD (negro) y SGD (rojo). En general la trayectoria de SGD será más ruidosa y también podría requerir más pasos, pero cada paso es mucho más económico
<img src="../images/adaptive-sgd.png" width="600">
## Algoritmo Least Mean Square (LMS)
Podemos extender el filtro de Wiener al caso no-estacionario usando SGD, el resultado es un algoritmo simple que además es robusto: **El algoritmo LMS**
- Fue fue inventado en 1960 por [Bernard Widrow](https://en.wikipedia.org/wiki/Bernard_Widrow) y Ted Hoff
- A diferencia del filtro de Wiener no se requiere conocimiento estadístico del proceso. Tampoco se requiere calcular e invertir la matriz de correlación
- El algoritmo LMS se ajusta o entrena de manera recursiva y online
Consideremos la función de costo **estocástica** para la arquitectura FIR que utilizamos para el filtro de Wiener
$$
\begin{align}
J^s_n(\textbf{w}) &= e_n^2 \nonumber \\
&= (d_n - y_n)^2 \nonumber \\
&= (d_n - \textbf{w}^T \textbf{u}_n )^2 \nonumber \\
&= (d_n - \sum_{k=0}^{L} w_{n, k} u_{n-k} )^2 \nonumber
\end{align}
$$
donde definimos $\textbf{u}_n = [u_n, u_{n-1}, \ldots, u_{n-L}]$.
:::{note}
A diferencia del filtro de Wiener no aplicamos el valor esperado al error cuadrático. Se usa el error cuadrático instantaneo
:::
Para continuar calculamos el gradiente en función del peso $w_{n, k}$
$$
\frac{d J^s_n (\textbf{w})}{d w_{n, k}} = - 2 e_n u_{n-k}
$$
Luego, usando la regla SGD llegamos a
$$
w_{n+1, k} = w_{n, k} + 2 \mu e_n u_{n-k}, k=0, 1, \ldots, L
$$
y que en forma matricial es
$$
\begin{align}
\textbf{w}_{n+1} &= \textbf{w}_{n} + 2 \mu e_n \textbf{u}_{n}\nonumber \\
&= \textbf{w}_{n} + 2 \mu (d_n - \textbf{w}_{n}^T \textbf{u}_{n}) \textbf{u}_{n}, \nonumber
\end{align}
$$
que se conoce como la regla de **Widrow-Hoff**
:::{important}
El algoritmo LMS estima el error instantaneo y actualiza los pesos recursivamente
:::
La complejidad de este algoritmo es $L+1$.
### Convergencia del algoritmo LMS (Haykin 6.5)
El algoritmo LMS tiende en la media al valor óptimo
$$
\mathbb{E}[\textbf{w}_n] \to \textbf{w}^*
$$
para $n\to \infty$
Además convergence en la media cuadrada: La varianza de $\textbf{w}_n - \textbf{w}^*$ tiene al valor mínimo de $J$ para $n\to \infty$
Esto se cumple si
$$
0 < \mu < \frac{2}{\text{Tr}[R_{uu}]}
$$
donde $R_{uu} = \mathbb{E}[\textbf{u}_n \textbf{u}_n^T ]$ es la matriz de autocorrelación y $\text{Tr}[]$ el operador que calcula la traza de una matriz
### Algoritmo Normalized LMS (NLMS)
Tenemos la siguiente regla iterativa
$$
\begin{align}
\textbf{w}_{n+1} &= \textbf{w}_{n} + 2 \mu (d_n - \textbf{w}_{n}^T \textbf{u}_{n}) \textbf{u}_{n} \nonumber \\
& = \textbf{w}_{n} + \Delta \textbf{w}_n \nonumber
\end{align}
$$
que se puede interpretar graficamente como
<img src="../images/adaptive-lms-geometry.png" width="400">
(donde $\textbf{x}(k)$ y $\textbf{w}(k)$ corresponden a $\textbf{u}_n$ y $\textbf{w}_n$ en nuestra notación, respectivamente)
:::{note}
Los cambios en el vector de peso $\Delta \textbf{w}_n$ son paralelos a $\textbf{u}_{n}$. Además estos cambios podrían estar dominados por
$$
\max \textbf{u}_{n} = [u_n, u_{n-1}, \ldots, u_{n-L}]
$$
:::
El algoritmo **Normalized LMS** (NLMS) corrige este problema ponderando por la varianza de $\textbf{u}_{n}$
$$
\textbf{w}_{n+1} = \textbf{w}_{n} + 2 \mu (d_n - \textbf{w}_{n}^T \textbf{u}_{n}) \frac{\textbf{u}_{n}}{\left(\|\textbf{u}_{n}\|^2 + \delta\right)}
$$
donde la constante $\delta$ es un valor pequeño que se usa para evitar divisiones por cero. En lo que sigue usaremos NLMS para revisar algunas aplicaciones
## Implementación del filtro NLMS en Python
Podemos implementar las ecuaciones del filtro NLMS como se muestra a continuación
```
class Filtro_NLMS:
def __init__(self, L, mu, delta=1e-6, winit=None):
self.L = L
self.w = np.zeros(shape=(L+1, ))
self.mu = mu
self.delta = delta
def update(self, un, dn):
# Asumiendo que un = [u[n], u[n-1], ..., u[n-L]]
unorm = np.dot(un, un) + self.delta
yn = np.dot(self.w, un)
self.w += 2*self.mu*(dn - yn)*(un/unorm)
return yn
```
- El filtro recibe como entrada el orden $L$ y la tasa de aprendizaje $\mu$
- Se asume un vector cero para los pesos iniciales, pero también en la práctica podemos partir de una solución anterior si esta existiera
- Para actualizar el vector de pesos es necesario entregar el vector $\textbf{u}_n \in \mathbb{R}^{L+1}$ y la salida deseada $d_n \in \mathbb{R}$. La función `update` retorna la salida predicha por el filtro $y_n = w_n^T \textbf{u}_n
$
A continuación probaremos este filtro con una aplicación conocida como **Adaptive line enhancement** (ALE). ALE se refiere a un sistema adaptivo para eliminar ruido blanco aditivo de una señal. El sistema aprende un filtro pasabanda en torno a la frecuencia de interés
En ALE usamos como señal deseada
$$
d_n = u_n = \textbf{u}_n[0]
$$
El valor predicho por el filtro será la señal $u$ pero libre de ruido blanco. Esto se debe a que el ruido blanco no tiene correlación y por ende el filtro adaptivo no lo puede predecir
```
# Digamos que u = s + n
# El objetivo es limpiar u para obtener s
# s es una señal determínista y n es ruido blanco
Fs, f0 = 100, 5
t = np.arange(0, 4, 1/Fs)
s = np.sin(2.0*np.pi*t*f0)
n = 0.5*np.random.randn(len(t))
s[t>2.0] += 5 # Simulemos un cambio abrupto en la media de la señal
#s += s*(0.5 + 0.5*np.cos(2.0*np.pi*t/2)) # Tremolo (AM)
u = s + n
```
A diferencia de un filtro estático (como el filtro de Wiener) es posible filtrar incluso ante cambios bruscos en la señal.
Estudie como cambia el resultado del filtro con distintos valores de $\mu$
```
L = 20
u_preds = {}
for mu in np.logspace(-2, 0, num=10):
myfilter = Filtro_NLMS(L=L, mu=mu)
u_preds[mu] = np.zeros(shape=(len(u),))
for k in range(L+1, len(u)):
u_preds[mu][k] = myfilter.update(u[k-L-1:k][::-1], u[k])
hMap = hv.HoloMap(kdims='mu')
for mu, u_pred in u_preds.items():
s1 = hv.Curve((t, s), 'Tiempo', 'Señal', label='Limpia')
s2 = hv.Scatter((t, u), 'Tiempo', 'Señal', label='Contaminada')
s3 = hv.Curve((t, u_pred), 'Tiempo', 'Señal', label='Filtrada')
hMap[mu] = hv.Overlay([s1, s2, s3]).opts(hv.opts.Overlay(legend_position='top'),
hv.opts.Curve(ylim=(-5, 10), height=350))
hMap
```
:::{important}
La tasa de aprendizaje $\mu$ controla la velocidad de adaptación. Pero una tasa demasiado grande provoca que el filtro sea inestable. En general el valor óptimo de $\mu$ depende del problema y del valor de $L$
:::
La siguiente figura muestra la respuesta en frecuencia del filtro en función del tiempo para $\mu=0.02$
Observe como a medida que se adapta el filtro se concentra en la frecuencia fundamental de la señal, que en este caso es 5 Hz
```
L = 20
u_preds = {}
myfilter = Filtro_NLMS(L=L, mu=0.02)
H_history = np.zeros(shape=(512, len(u)))
for k in range(L+1, len(u)):
myfilter.update(u[k-L-1:k][::-1], u[k])
fk, Hk = scipy.signal.freqz(b=myfilter.w, a=1, fs=Fs)
H_history[:, k] = np.abs(Hk)
hv.Image((t, fk, H_history), kdims=['Tiempo [s]', 'Frecuencia [Hz]']).opts(cmap='Blues')
```
## Comparación entre Filtro de Wiener/GD y algoritmo LMS/SGD
- **Supuestos**: Wiener requiere un ambiente estacionario lo cual nos permite calcular $R_{uu}$ y $R_{ud}$. En LMS la señal puede ser no estacionaria.
- **Aprendizaje:** En el filtro de Wiener el aprendizaje es determinista. En LMS el aprendizaje viene **promediando** a nivel de los estimadores de $w$. En LMS el aprendizaje es estadístico.
- **Optimalidad:** Wiener es óptimo en cambio LMS es sub-óptimo (localmente óptimo). LMS tiende a la solución de Wiener
- **Costo:** LMS se actualiza online y tiene costo $L$. Wiener se entrena offline y tiene costo $L^2$
A continuación se muestra un diagrama que compara el filtro de Wiener y el algoritmo LMS
<img src="../images/adaptive-lms.png">
|
github_jupyter
|
import holoviews as hv
hv.extension('bokeh')
hv.opts.defaults(hv.opts.Curve(width=500),
hv.opts.Points(width=500),
hv.opts.Image(width=500, colorbar=True, cmap='Viridis'))
import numpy as np
import scipy.signal
import scipy.linalg
J = lambda w : (w-1)**2 + 0.2*np.sin(2*np.pi*w) # Función de costo
gradJ = lambda w : 2*(w-1) + 0.2*2*np.pi*np.cos(2*np.pi*w) # Gradiente
mu = 0.05 # Tasa de aprendizaje
iteraciones = 15
wt = np.zeros(shape=(iteraciones, 3))
wt[0, :] = np.array([0.05, 0.4, 1.9]) # Solución inicial
w_plot = np.linspace(0, 2, num=100)
for k in range(1, iteraciones):
wt[k, :] = wt[k-1, :] - mu*gradJ(wt[k-1, :])
loss_surface = hv.Curve((w_plot, J(w_plot)), 'w', 'J')
hMap = hv.HoloMap(kdims='Iteración')
for k in range(iteraciones):
dots = hv.Points((wt[k, :], J(wt[k, :]))).opts(size=10, color='k')
mag = mu*gradJ(wt[k, :])
angle = np.pi/2 - np.sign(-mag)*np.pi/2
mag = np.abs(mag)
arrows = hv.VectorField((wt[k, :], J(wt[k, :]), angle, mag)).opts(pivot='tail',
magnitude=hv.dim('Magnitude'),
rescale_lengths=False)
hMap[k] = dots * arrows
loss_surface * hMap
J = lambda w : (w-1)**2 + 0.2*np.sin(2*np.pi*w) # Función de costo
gradJ = lambda w : 2*(w-1) + 0.2*2*np.pi*np.cos(2*np.pi*w) # Gradiente
mu = 0.5 # Tasa de aprendizaje
iteraciones = 15
wt = np.zeros(shape=(iteraciones, 3))
wt[0, :] = np.array([0.05, 0.4, 1.9]) # Solución inicial
w_plot = np.linspace(0, 2, num=100)
for k in range(1, iteraciones):
wt[k, :] = wt[k-1, :] - mu*gradJ(wt[k-1, :])
loss_surface = hv.Curve((w_plot, J(w_plot)), 'w', 'J')
hMap = hv.HoloMap(kdims='Iteración')
for k in range(iteraciones):
dots = hv.Points((wt[k, :], J(wt[k, :]))).opts(size=10, color='k')
mag = mu*gradJ(wt[k, :])
angle = np.pi/2 - np.sign(-mag)*np.pi/2
mag = np.abs(mag)
arrows = hv.VectorField((wt[k, :], J(wt[k, :]), angle, mag)).opts(pivot='tail',
magnitude=hv.dim('Magnitude'),
rescale_lengths=False)
hMap[k] = dots * arrows
loss_surface * hMap
class Filtro_NLMS:
def __init__(self, L, mu, delta=1e-6, winit=None):
self.L = L
self.w = np.zeros(shape=(L+1, ))
self.mu = mu
self.delta = delta
def update(self, un, dn):
# Asumiendo que un = [u[n], u[n-1], ..., u[n-L]]
unorm = np.dot(un, un) + self.delta
yn = np.dot(self.w, un)
self.w += 2*self.mu*(dn - yn)*(un/unorm)
return yn
# Digamos que u = s + n
# El objetivo es limpiar u para obtener s
# s es una señal determínista y n es ruido blanco
Fs, f0 = 100, 5
t = np.arange(0, 4, 1/Fs)
s = np.sin(2.0*np.pi*t*f0)
n = 0.5*np.random.randn(len(t))
s[t>2.0] += 5 # Simulemos un cambio abrupto en la media de la señal
#s += s*(0.5 + 0.5*np.cos(2.0*np.pi*t/2)) # Tremolo (AM)
u = s + n
L = 20
u_preds = {}
for mu in np.logspace(-2, 0, num=10):
myfilter = Filtro_NLMS(L=L, mu=mu)
u_preds[mu] = np.zeros(shape=(len(u),))
for k in range(L+1, len(u)):
u_preds[mu][k] = myfilter.update(u[k-L-1:k][::-1], u[k])
hMap = hv.HoloMap(kdims='mu')
for mu, u_pred in u_preds.items():
s1 = hv.Curve((t, s), 'Tiempo', 'Señal', label='Limpia')
s2 = hv.Scatter((t, u), 'Tiempo', 'Señal', label='Contaminada')
s3 = hv.Curve((t, u_pred), 'Tiempo', 'Señal', label='Filtrada')
hMap[mu] = hv.Overlay([s1, s2, s3]).opts(hv.opts.Overlay(legend_position='top'),
hv.opts.Curve(ylim=(-5, 10), height=350))
hMap
L = 20
u_preds = {}
myfilter = Filtro_NLMS(L=L, mu=0.02)
H_history = np.zeros(shape=(512, len(u)))
for k in range(L+1, len(u)):
myfilter.update(u[k-L-1:k][::-1], u[k])
fk, Hk = scipy.signal.freqz(b=myfilter.w, a=1, fs=Fs)
H_history[:, k] = np.abs(Hk)
hv.Image((t, fk, H_history), kdims=['Tiempo [s]', 'Frecuencia [Hz]']).opts(cmap='Blues')
| 0.402275 | 0.873539 |
<a href="https://csdms.colorado.edu"><img style="float: center; width: 75%" src="../../media/logo.png"></a>
# Programming with Python
## Creating Functions
### minutes: 30
> ## Learning Objectives
>
> * Define a function that takes parameters.
> * Return a value from a function.
> * Test and debug a function.
> * Set default values for function parameters.
> * Explain why we should divide programs into small, single-purpose functions.
At this point,
we've written code to draw some interesting features in our topographical data,
loop over all our data files to quickly draw these plots for each of them,
and have Python make decisions based on what it sees in our data.
But, our code is getting pretty long and complicated;
what if we had thousands of datasets,
and didn't want to generate a figure for every single one?
Commenting out the figure-drawing code is a nuisance.
Also, what if we want to use that code again,
on a different dataset or at a different point in our program?
Cutting and pasting it is going to make our code get very long and very repetitive,
very quickly.
We'd like a way to package our code so that it is easier to reuse,
and Python provides for this by letting us define things called 'functions' -
a shorthand way of re-executing longer pieces of code.
Import required packages
```
import numpy
import matplotlib.pyplot
```
Let's start by defining a function `fahr_to_cel` that converts temperatures from Fahrenheit to degrees. To convert temperatures in degrees Fahrenheit to Celsius, subtract 32 and multiply by .5556 (or 5/9).
```
def fahr_to_cel(temp):
temp_new = (temp-32)*5/9
return temp_new
```
The function definition opens with the word `def` you need to define a function,
which is followed by the name of the function
and a parenthesized list of parameter names.
The [body](reference.html#function-body) of the function --- the
statements that are executed when it runs --- is indented below the definition line.
When we call the function,
the values we pass to it are assigned to those variables
so that we can use them inside the function.
Inside the function,
we use a [return statement](reference.html#return-statement) to send a result back to whoever asked for it.
Let's try running our function.
Calling our own function is no different from calling any other function:
```
fahr_to_cel(100)
```
We've successfully called the function that we defined,
and we have access to the value that we returned.
## Composing Functions
Now that we've seen how to turn Fahrenheit into Celsius,
it's easy to turn Celsius into Kelvin (+ 273.15):
```
def cel_2_kel(temp):
return temp+273.15
```
What about converting Fahrenheit to Kelvin?
We could write out the formula,but we don't need to.
Instead,we can [compose](reference.html#compose) the two functions we have already created:
```
def fahr_to_kel(temp):
return(cel_2_kel(fahr_to_cel(temp)))
print('freezing point of water in Kelvin:', fahr_to_kel(32.0))
```
This is our first taste of how larger programs are built:
we define basic operations,
then combine them in ever-larger chunks to get the effect we want.
Real-life functions will usually be larger than the ones shown here --- typically half a dozen to a few dozen lines --- but
they shouldn't ever be much longer than that,
or the next person who reads it won't be able to understand what's going on.
## Tidying up
Now that we know how to wrap bits of code up in functions,
we can make our topo analysis easier to read and easier to reuse.
First, let's make an `analyze` function that generates our plots:
```
def analyze(filename):
data = numpy.loadtxt(fname=filename, delimiter=',')
fig = matplotlib.pyplot.figure(figsize=(10.0, 3.0))
axes1 = fig.add_subplot(1, 3, 1)
axes2 = fig.add_subplot(1, 3, 2)
axes3 = fig.add_subplot(1, 3, 3)
axes1.set_ylabel('average')
axes1.plot(data.mean(axis=0))
axes2.set_ylabel('max')
axes2.plot(data.max(axis=0))
axes3.set_ylabel('min')
axes3.plot(data.min(axis=0))
fig.tight_layout()
matplotlib.pyplot.show()
```
Try it:
```
filename ='../../data/topo.asc'
analyze(filename)
```
## Testing and Documenting
Once we start putting things in functions so that we can re-use them,
we need to start testing that those functions are working correctly.
To see how to do this,let's write a function to center a dataset around the mean of that dataset (or in other words, that the mean becomes 0 in the centered datasets, values smaller than the mean become negative, values greater than the mean become positive :
```
def center(data):
new_data= data-data.mean()
return new_data
```
We could test this on our actual data,
but since we don't know what the values ought to be,
it will be hard to tell if the result was correct.
Instead,let's use NumPy to create a matrix of 1's
and then center that around its mean:
```
z = numpy.ones((5,5))
print(z)
print(center(z))
```
Now, let's write a function to center a dataset around any particular value :
```
def center(data, desired):
return (data - data.mean()) + desired
```
Test
```
z = numpy.zeros((2,2))
print(z)
print(center(z, 3))
```
That looks right,
so let's try `center` on our real data:
```
filename ='../../data/topo.asc'
data = numpy.loadtxt(fname=filename, delimiter=',')
print(center(data, 0))
import matplotlib.pyplot as plt
plt.imshow(center(data, -200))
plt.colorbar()
```
It's hard to tell from the default output whether the result is correct,
but there are a few simple tests that will reassure us:
```
print('original min, mean, and max are:', data.min(), data.mean(), data.max())
centered = center(data, 0)
print('min, mean, and and max of centered data are:', centered.min(), centered.mean(), centered.max())
```
That seems almost right:
the original mean was about ca. 3153.6,
so the lower bound from zero is now about 3153.6-2565.0293.
The mean of the centered data isn't quite zero --- we'll explore why not in the challenges --- but it's pretty close.
We can even go further and check that the standard deviation hasn't changed:
```
print('std dev before and after:', data.std(), centered.std())
```
Those values look the same,
but we probably wouldn't notice if they were different in the sixth decimal place.
Let's do this instead:
```
print('difference in standard deviations before and after:', data.std() - centered.std())
```
Again,
the difference is very small.
It's still possible that our function is wrong,
but it seems unlikely enough that we should probably get back to doing our analysis.
We have one more task first, though:
we should write some [documentation](reference.html#documentation) for our function
to remind ourselves later what it's for and how to use it.
The usual way to put documentation in software is to add [comments](reference.html#comment) like this:
```
# center(data, desired): return a new array containing the original data centered around the desired value.
def center(data, desired):
return (data - data.mean()) + desired
```
There's a better way, though.
If the first thing in a function is a string that isn't assigned to a variable,
that string is attached to the function as its documentation:
```
def center(data, desired):
'''Return a new array containing the original data centered around the desired value.'''
return (data - data.mean()) + desired
```
This is better because we can now ask Python's built-in help system to show us the documentation for the function:
```
help(center)
?center
```
A string like this is called a [docstring](reference.html#docstring).
We don't need to use triple quotes when we write one,
but if we do,
we can break the string across multiple lines:
```
def center(data, desired):
'''Return a new array containing the original data centered around the desired value.
Example: center([1, 2, 3], 0) => [-1, 0, 1]'''
return (data - data.mean()) + desired
help(center)
```
## Defining Defaults
We have passed parameters to functions in two ways:
directly, as in `type(data)`,
and by name, as in `numpy.loadtxt(fname='something.csv', delimiter=',')`.
In fact,
we can pass the filename to `loadtxt` without the `fname=`:
```
numpy.loadtxt('../../data/topo.asc', delimiter=',')
```
but we still need to say `delimiter=`:
What happens if you enter the following statement?
~~~ {.python}
numpy.loadtxt('data/topo.asc', ',')
~~~
To understand what's going on,
and make our own functions easier to use,
let's re-define our `center` function like this:
```
def center(data, desired=0.0):
'''Return a new array containing the original data centered around the desired value (0 by default).
Example: center([1, 2, 3], 0) => [-1, 0, 1]'''
return (data - data.mean()) + desired
```
The key change is that the second parameter is now written `desired=0.0` instead of just `desired`.
If we call the function with two arguments,
it works as it did before:
```
test_data = numpy.zeros((2, 2))
print(center(test_data, 3))
```
But we can also now call it with just one parameter,
in which case `desired` is automatically assigned the [default value](reference.html#default-value) of 0.0:
```
more_data = 5 + numpy.zeros((2, 2))
print('data before centering:')
print(more_data)
print('centered data:')
print(center(more_data))
```
This is handy:
if we usually want a function to work one way,
but occasionally need it to do something else,
we can allow people to pass a parameter when they need to
but provide a default to make the normal case easier.
The example below shows how Python matches values to parameters:
```
def display(a=1, b=2, c=3):
print('a:', a, 'b:', b, 'c:', c)
print('no parameters:')
display()
print('one parameter:')
display(55)
print('two parameters:')
display(55, 66)
```
As this example shows,
parameters are matched up from left to right,
and any that haven't been given a value explicitly get their default value.
We can override this behavior by naming the value as we pass it in:
```
print('only setting the value of c')
display(c=77)
```
With that in hand,
let's look at the help for `numpy.loadtxt`:
```
help(numpy.loadtxt)
```
There's a lot of information here,
but the most important part is the first couple of lines:
~~~ {.output}
loadtxt(fname, dtype=<type 'float'>, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None,
unpack=False, ndmin=0)
~~~
This tells us that `loadtxt` has one parameter called `fname` that doesn't have a default value,
and eight others that do.
If we call the function like this (try):
~~~ {.python}
numpy.loadtxt('data/topo.asc', ',')
~~~
then the filename is assigned to `fname` (which is what we want),
but the delimiter string `','` is assigned to `dtype` rather than `delimiter`,
because `dtype` is the second parameter in the list. However ',' isn't a known `dtype` so
our code produced an error message when we tried to run it.
When we call `loadtxt` we don't have to provide `fname=` for the filename because it's the
first item in the list, but if we want the ',' to be assigned to the variable `delimiter`,
we *do* have to provide `delimiter=` for the second parameter since `delimiter` is not
the second parameter in the list.
## Test your skills 01: Combining strings
"Adding" two strings produces their concatenation:
`'a' + 'b'` is `'ab'`.
Write a function called `fence` that takes two parameters called `original` and `wrapper`
and returns a new string that has the wrapper character at the beginning and end of the original.
A call to your function should look like this:
~~~ {.python}
print(fence('name', '*'))
~~~
~~~ {.output}
*name*
~~~
## Test your skills 02: Selecting characters from strings
If the variable `s` refers to a string,
then `s[0]` is the string's first character
and `s[-1]` is its last.
Write a function called `outer`
that returns a string made up of just the first and last characters of its input.
A call to your function should look like this:
~~~ {.python}
print(outer('helium'))
~~~
~~~ {.output}
hm
~~~
## Test your skills 03 : Rescaling an array
Write a function `rescale` that takes an array as input
and returns a corresponding array of values scaled to lie in the range 0.0 to 1.0.
(Hint: If $L$ and $H$ are the lowest and highest values in the original array,
then the replacement for a value $v$ should be $(v-L) / (H-L)$.)
## Test your skills 04: Testing and documenting your function
Run the commands `help(numpy.arange)` and `help(numpy.linspace)`
to see how to use these functions to generate regularly-spaced values,
then use those values to test your `rescale` function.
Once you've successfully tested your function,
add a docstring that explains what it does.
## Test your skills 05: Defining defaults
Rewrite the `rescale` function so that it scales data to lie between 0.0 and 1.0 by default,
but will allow the caller to specify lower and upper bounds if they want.
Compare your implementation to your neighbor's:
do the two functions always behave the same way?
## Test your skills 06: Variables inside and outside functions
What does the following piece of code display when run - and why?
~~~ {.python}
f = 0
k = 0
def f2k(f):
k = ((f-32)*(5.0/9.0)) + 273.15
return k
f2k(8)
f2k(41)
f2k(32)
print(k)
~~~
|
github_jupyter
|
import numpy
import matplotlib.pyplot
def fahr_to_cel(temp):
temp_new = (temp-32)*5/9
return temp_new
fahr_to_cel(100)
def cel_2_kel(temp):
return temp+273.15
def fahr_to_kel(temp):
return(cel_2_kel(fahr_to_cel(temp)))
print('freezing point of water in Kelvin:', fahr_to_kel(32.0))
def analyze(filename):
data = numpy.loadtxt(fname=filename, delimiter=',')
fig = matplotlib.pyplot.figure(figsize=(10.0, 3.0))
axes1 = fig.add_subplot(1, 3, 1)
axes2 = fig.add_subplot(1, 3, 2)
axes3 = fig.add_subplot(1, 3, 3)
axes1.set_ylabel('average')
axes1.plot(data.mean(axis=0))
axes2.set_ylabel('max')
axes2.plot(data.max(axis=0))
axes3.set_ylabel('min')
axes3.plot(data.min(axis=0))
fig.tight_layout()
matplotlib.pyplot.show()
filename ='../../data/topo.asc'
analyze(filename)
def center(data):
new_data= data-data.mean()
return new_data
z = numpy.ones((5,5))
print(z)
print(center(z))
def center(data, desired):
return (data - data.mean()) + desired
z = numpy.zeros((2,2))
print(z)
print(center(z, 3))
filename ='../../data/topo.asc'
data = numpy.loadtxt(fname=filename, delimiter=',')
print(center(data, 0))
import matplotlib.pyplot as plt
plt.imshow(center(data, -200))
plt.colorbar()
print('original min, mean, and max are:', data.min(), data.mean(), data.max())
centered = center(data, 0)
print('min, mean, and and max of centered data are:', centered.min(), centered.mean(), centered.max())
print('std dev before and after:', data.std(), centered.std())
print('difference in standard deviations before and after:', data.std() - centered.std())
# center(data, desired): return a new array containing the original data centered around the desired value.
def center(data, desired):
return (data - data.mean()) + desired
def center(data, desired):
'''Return a new array containing the original data centered around the desired value.'''
return (data - data.mean()) + desired
help(center)
?center
def center(data, desired):
'''Return a new array containing the original data centered around the desired value.
Example: center([1, 2, 3], 0) => [-1, 0, 1]'''
return (data - data.mean()) + desired
help(center)
numpy.loadtxt('../../data/topo.asc', delimiter=',')
def center(data, desired=0.0):
'''Return a new array containing the original data centered around the desired value (0 by default).
Example: center([1, 2, 3], 0) => [-1, 0, 1]'''
return (data - data.mean()) + desired
test_data = numpy.zeros((2, 2))
print(center(test_data, 3))
more_data = 5 + numpy.zeros((2, 2))
print('data before centering:')
print(more_data)
print('centered data:')
print(center(more_data))
def display(a=1, b=2, c=3):
print('a:', a, 'b:', b, 'c:', c)
print('no parameters:')
display()
print('one parameter:')
display(55)
print('two parameters:')
display(55, 66)
print('only setting the value of c')
display(c=77)
help(numpy.loadtxt)
| 0.421314 | 0.990487 |
# Individual Project
## Vending Machine
**How to use this notebook and complete the project?**
1. You need to answer all required questions by answering in the markdown cell.
2. Read the instruction and fill in the #todo sections.
3. Run all the cells to store the results of each codes.
4. Save this notebook by clicking on `File`, then click `Download as`, choose `HTML`.
5. Push this notebook to your GitHub reporitory by creating a new repository, named `AIP_IndividualProject`. Set this repository as `Private` until the day of your submission, which is on *29th June 2019, 1.30 PM*.
**Introduction**
Japan is a country full of vending machines. Everyone use vending machine at least once a week to quickly by a drink and quench their thirst. Imagine you are going to operate a vending machine. There several drinks that user can choose to buy from a vending machine.
**Question 1**
Does your country has vending machines everywhere? If has, do people usually use them? What kind of items are sold in a vending machine?
**Answer**:
**Question 2**
Imagine you are going to program a vending machine that sell 5 types of drinks, which are:
1. Orange Juice - 120 Yen
2. Mineral Water - 100 Yen
3. Coke - 120 Yen
4. Tea - 120 Yen
5. Green Tea - 110 Yen
Create a flowchart to describe how a customer would like to buy a drink from this vending machine.
You may draw a flowchart by using *powerpoint*, then save it as a picture in `.jpg` or `.png`. Then, use this command to the *Answer* column below.
`<img src=_____.jpg>`
**Answer**:
<img src="Flowchart_vm.png" height="640" width="480">
**Step 1**
#TODO
Intializing the necessary items sold and get an input to choose drinks
```
#TODo Build a dictionary that contains drinks and its value in japanese yen. The drinks' name should be in string and drink's price should be in integer data type.
drinks = {"Orange Juice": 120, "Mineral Water": 100, "Coke": 120, "Tea": 120, "Green Tea": 110}
choice = input("Choose your drinks (A for Orange Juice, B for Mineral Water, C for Coke, D for Tea, E for Green Tea): ")
```
**Step 2**
#TODO
Build a function of `input_money` that return and print the amount of balance needed, balance will be given, and give the selected drink to the customer.
```
def input_money(value):
money = input("Please insert money: ")
value = int(value)
money = int(money)
if money < value:
need_money = value - money
print("You need {} yen more".format(need_money))
money_put = input("Put more money: ")
money_put = int(money_put)
if money_put == need_money:
print ("Here is your drink")
elif money > value:
balance = money - value
print("Your balance is {} yen and here is your drink".format(balance))
elif money == value:
print ("Here is your drink")
```
**Step 3**
#TODO
Write an else-if statement to get the drink's value from the chosen drink.
```
if choice == "A":
chosen_drink = drinks.get("Orange Juice")
elif choice == "B":
chosen_drink = drinks.get("Mineral Water")
elif choice == "C":
chosen_drink = drinks.get("Coke")
elif choice == "D":
chosen_drink = drinks.get("Tea")
else:
chosen_drink = drinks.get("Green Tea")
input_money(chosen_drink)
```
**Bonus Question**:
Write a program to add another condition of choosing between hot and cold tea or green tea, with different or same price.
**Question 3**:
What else you can add to the vending machine program? How would you like to further improve the program?
**Answer**:
|
github_jupyter
|
#TODo Build a dictionary that contains drinks and its value in japanese yen. The drinks' name should be in string and drink's price should be in integer data type.
drinks = {"Orange Juice": 120, "Mineral Water": 100, "Coke": 120, "Tea": 120, "Green Tea": 110}
choice = input("Choose your drinks (A for Orange Juice, B for Mineral Water, C for Coke, D for Tea, E for Green Tea): ")
def input_money(value):
money = input("Please insert money: ")
value = int(value)
money = int(money)
if money < value:
need_money = value - money
print("You need {} yen more".format(need_money))
money_put = input("Put more money: ")
money_put = int(money_put)
if money_put == need_money:
print ("Here is your drink")
elif money > value:
balance = money - value
print("Your balance is {} yen and here is your drink".format(balance))
elif money == value:
print ("Here is your drink")
if choice == "A":
chosen_drink = drinks.get("Orange Juice")
elif choice == "B":
chosen_drink = drinks.get("Mineral Water")
elif choice == "C":
chosen_drink = drinks.get("Coke")
elif choice == "D":
chosen_drink = drinks.get("Tea")
else:
chosen_drink = drinks.get("Green Tea")
input_money(chosen_drink)
| 0.065064 | 0.961498 |
```
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.01
training_epochs = 20
batch_size = 256
display_step = 1
examples_to_show = 10
# Network Parameters
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
return layer_2
# Building the decoder
def decoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
# Initializing the variables
#init = tf.initialize_all_variables()
init = tf.global_variables_initializer()
# Launch the graph
# Using InteractiveSession (more convenient while using Notebooks)
sess = tf.InteractiveSession()
sess.run(init)
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
# Applying encode and decode over test set
encode_decode = sess.run(
y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
f.show()
plt.show()
#plt.draw()
#plt.waitforbuttonpress()
```
|
github_jupyter
|
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.01
training_epochs = 20
batch_size = 256
display_step = 1
examples_to_show = 10
# Network Parameters
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
return layer_2
# Building the decoder
def decoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
# Initializing the variables
#init = tf.initialize_all_variables()
init = tf.global_variables_initializer()
# Launch the graph
# Using InteractiveSession (more convenient while using Notebooks)
sess = tf.InteractiveSession()
sess.run(init)
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
# Applying encode and decode over test set
encode_decode = sess.run(
y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
f.show()
plt.show()
#plt.draw()
#plt.waitforbuttonpress()
| 0.755637 | 0.725649 |
```
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore',category=DeprecationWarning)
import zipfile
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
# Unzip the downloaded file
def unzip_data(filename):
"""
Utility function to unzip a zipped file.
"""
zip_ref = zipfile.ZipFile(filename, "r")
zip_ref.extractall()
zip_ref.close()
import cv2
import numpy as np
from matplotlib import pyplot as plt
def load_image(image):
image_bgr = cv2.imread(image, cv2.IMREAD_COLOR)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb), plt.axis("off")
plt.show()
# Setup data inputs
IMG_SIZE = (224, 224)
def create_data_loaders(train_dir, test_dir, image_size=IMG_SIZE):
"""
Creates a training and test image BatchDataset from train_dir and test_dir.
"""
train_data = tf.keras.preprocessing.image_dataset_from_directory(train_dir,
label_mode="categorical",
image_size=image_size)
# Note: the test data is the same as the previous experiment, we could
# skip creating this, but we'll leave this here to practice.
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir,
label_mode="categorical",
image_size=image_size)
return train_data, test_data
# Create a data augmentation stage with horizontal flipping, rotations, zooms
data_augmentation = keras.Sequential([
preprocessing.RandomFlip("horizontal"),
preprocessing.RandomRotation(0.2),
preprocessing.RandomZoom(0.2),
preprocessing.RandomHeight(0.2),
preprocessing.RandomWidth(0.2),
# preprocessing.Rescaling(1./255) # keep for ResNet50V2, remove for EfficientNetB0
], name ="data_augmentation")
# Setup input shape and base model, freezing the base model layers
INPUT_SHAPE = (224, 224, 3)
BASE_MODEL = tf.keras.applications.EfficientNetB0(include_top=False)
def create_model(input_shape=INPUT_SHAPE, base_model=BASE_MODEL, num_classes=10):
# Fine-tune?
base_model.trainable = False
# Create input layer
inputs = layers.Input(shape=input_shape, name="input_layer")
# Add in data augmentation Sequential model as a layer
x = data_augmentation(inputs)
# Give base_model inputs (after augmentation) and don't train it
x = base_model(x, training=False)
# Pool output features of base model
x = layers.GlobalAveragePooling2D(name="global_average_pooling_layer")(x)
# Put a dense layer on as the output
outputs = layers.Dense(num_classes, activation="softmax", name="output_layer")(x)
# Make a model with inputs and outputs
model = keras.Model(inputs, outputs)
# Compile the model
model.compile(loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
return model
# Create a function to import an image and resize it to be able to be used with our model
def load_and_prep_image(filename, img_shape=224, scale=False):
"""
Reads in an image from filename, turns it into a tensor and reshapes into
(224, 224, 3).
"""
# Read in the image
img = tf.io.read_file(filename)
# Decode it into a tensor
img = tf.image.decode_jpeg(img)
# Resize the image
img = tf.image.resize(img, [img_shape, img_shape])
# Rescale the image (get all values between 0 and 1)
if scale:
return img/255.
else:
return img
!pip install wget
# Get data
import zipfile
import wget
# Download data (10 class subset of Food101 - https://www.kaggle.com/dansbecker/food-101)
# Already formatted in standard image classification directory style
# !wget https://storage.googleapis.com/ztm_tf_course/food_vision/10_food_classes_all_data.zip
unzip_data("10_food_classes_all_data.zip")
# How many images in each folder?
import os
# Walk through 10 percent data directory and list number of files
for dirpath, dirnames, filenames in os.walk("10_food_classes_all_data"):
print(f"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.")
```
## Model 1 (10 classes)
```
# Create BatchDataset
train_data, test_data = create_data_loaders(train_dir="10_food_classes_all_data/train/",
test_dir="10_food_classes_all_data/test/")
# What size is our data?
train_data
# Create tensorboard callback (functionized because need to create a new one for each model)
import datetime
def create_tensorboard_callback(dir_name, experiment_name):
log_dir = dir_name + "/" + experiment_name + "/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir
)
print(f"Saving TensorBoard log files to: {log_dir}")
return tensorboard_callback
# Create model
model_1 = create_model(num_classes=len(train_data.class_names))
# Fit the model
history_1_percent = model_1.fit(train_data,
epochs=5,
steps_per_epoch=len(train_data),
validation_data=test_data,
validation_steps=int(0.25 * len(test_data)), # validate for less steps
# Track model training logs
callbacks=[create_tensorboard_callback("transfer_learning", "all_data_aug")])
# Classes our model is trained on
class_names = train_data.class_names
class_names
image = "test_sushi.jpg"
load_image(image)
# Preprocess image
test_img = load_and_prep_image(image)
# Make predictions
test_img_expanded = tf.expand_dims(test_img, axis=0) # expand image dimensions (224, 224, 3) -> (1, 224, 224, 3)
pred = model_1.predict(test_img_expanded)
# Check the predicted class
food_class = class_names[tf.argmax(pred[0])]
food_class
def upload_checker(food):
flag = False
flag = True if food in class_names else flag
print("Image will be uploaded") if flag == True else print("Uploaded image is not a food and will not be uploaded")
upload_checker(food_class)
# model_1.save('10_class_food_classification_model')
new_model = tf.keras.models.load_model('10_class_food_classification_model')
# Preprocess image
test_img = load_and_prep_image("test_image.jfif")
load_image("test_image.jfif")
# Make predictions
test_img_expanded = tf.expand_dims(test_img, axis=0) # expand image dimensions (224, 224, 3) -> (1, 224, 224, 3)
pred = new_model.predict(test_img_expanded)
# Check the predicted class
class_names[tf.argmax(pred[0])]
```
|
github_jupyter
|
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore',category=DeprecationWarning)
import zipfile
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
# Unzip the downloaded file
def unzip_data(filename):
"""
Utility function to unzip a zipped file.
"""
zip_ref = zipfile.ZipFile(filename, "r")
zip_ref.extractall()
zip_ref.close()
import cv2
import numpy as np
from matplotlib import pyplot as plt
def load_image(image):
image_bgr = cv2.imread(image, cv2.IMREAD_COLOR)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb), plt.axis("off")
plt.show()
# Setup data inputs
IMG_SIZE = (224, 224)
def create_data_loaders(train_dir, test_dir, image_size=IMG_SIZE):
"""
Creates a training and test image BatchDataset from train_dir and test_dir.
"""
train_data = tf.keras.preprocessing.image_dataset_from_directory(train_dir,
label_mode="categorical",
image_size=image_size)
# Note: the test data is the same as the previous experiment, we could
# skip creating this, but we'll leave this here to practice.
test_data = tf.keras.preprocessing.image_dataset_from_directory(test_dir,
label_mode="categorical",
image_size=image_size)
return train_data, test_data
# Create a data augmentation stage with horizontal flipping, rotations, zooms
data_augmentation = keras.Sequential([
preprocessing.RandomFlip("horizontal"),
preprocessing.RandomRotation(0.2),
preprocessing.RandomZoom(0.2),
preprocessing.RandomHeight(0.2),
preprocessing.RandomWidth(0.2),
# preprocessing.Rescaling(1./255) # keep for ResNet50V2, remove for EfficientNetB0
], name ="data_augmentation")
# Setup input shape and base model, freezing the base model layers
INPUT_SHAPE = (224, 224, 3)
BASE_MODEL = tf.keras.applications.EfficientNetB0(include_top=False)
def create_model(input_shape=INPUT_SHAPE, base_model=BASE_MODEL, num_classes=10):
# Fine-tune?
base_model.trainable = False
# Create input layer
inputs = layers.Input(shape=input_shape, name="input_layer")
# Add in data augmentation Sequential model as a layer
x = data_augmentation(inputs)
# Give base_model inputs (after augmentation) and don't train it
x = base_model(x, training=False)
# Pool output features of base model
x = layers.GlobalAveragePooling2D(name="global_average_pooling_layer")(x)
# Put a dense layer on as the output
outputs = layers.Dense(num_classes, activation="softmax", name="output_layer")(x)
# Make a model with inputs and outputs
model = keras.Model(inputs, outputs)
# Compile the model
model.compile(loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
return model
# Create a function to import an image and resize it to be able to be used with our model
def load_and_prep_image(filename, img_shape=224, scale=False):
"""
Reads in an image from filename, turns it into a tensor and reshapes into
(224, 224, 3).
"""
# Read in the image
img = tf.io.read_file(filename)
# Decode it into a tensor
img = tf.image.decode_jpeg(img)
# Resize the image
img = tf.image.resize(img, [img_shape, img_shape])
# Rescale the image (get all values between 0 and 1)
if scale:
return img/255.
else:
return img
!pip install wget
# Get data
import zipfile
import wget
# Download data (10 class subset of Food101 - https://www.kaggle.com/dansbecker/food-101)
# Already formatted in standard image classification directory style
# !wget https://storage.googleapis.com/ztm_tf_course/food_vision/10_food_classes_all_data.zip
unzip_data("10_food_classes_all_data.zip")
# How many images in each folder?
import os
# Walk through 10 percent data directory and list number of files
for dirpath, dirnames, filenames in os.walk("10_food_classes_all_data"):
print(f"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.")
# Create BatchDataset
train_data, test_data = create_data_loaders(train_dir="10_food_classes_all_data/train/",
test_dir="10_food_classes_all_data/test/")
# What size is our data?
train_data
# Create tensorboard callback (functionized because need to create a new one for each model)
import datetime
def create_tensorboard_callback(dir_name, experiment_name):
log_dir = dir_name + "/" + experiment_name + "/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir
)
print(f"Saving TensorBoard log files to: {log_dir}")
return tensorboard_callback
# Create model
model_1 = create_model(num_classes=len(train_data.class_names))
# Fit the model
history_1_percent = model_1.fit(train_data,
epochs=5,
steps_per_epoch=len(train_data),
validation_data=test_data,
validation_steps=int(0.25 * len(test_data)), # validate for less steps
# Track model training logs
callbacks=[create_tensorboard_callback("transfer_learning", "all_data_aug")])
# Classes our model is trained on
class_names = train_data.class_names
class_names
image = "test_sushi.jpg"
load_image(image)
# Preprocess image
test_img = load_and_prep_image(image)
# Make predictions
test_img_expanded = tf.expand_dims(test_img, axis=0) # expand image dimensions (224, 224, 3) -> (1, 224, 224, 3)
pred = model_1.predict(test_img_expanded)
# Check the predicted class
food_class = class_names[tf.argmax(pred[0])]
food_class
def upload_checker(food):
flag = False
flag = True if food in class_names else flag
print("Image will be uploaded") if flag == True else print("Uploaded image is not a food and will not be uploaded")
upload_checker(food_class)
# model_1.save('10_class_food_classification_model')
new_model = tf.keras.models.load_model('10_class_food_classification_model')
# Preprocess image
test_img = load_and_prep_image("test_image.jfif")
load_image("test_image.jfif")
# Make predictions
test_img_expanded = tf.expand_dims(test_img, axis=0) # expand image dimensions (224, 224, 3) -> (1, 224, 224, 3)
pred = new_model.predict(test_img_expanded)
# Check the predicted class
class_names[tf.argmax(pred[0])]
| 0.741861 | 0.775477 |
# Feature Engineering
## Seth Adams
### Merged train and test files. Found repeat images.
I found 67 repeated images after merging the train and test sets. It appears as though these images might be correctly classified for one class but then wrong for another. These repeats will be stored in a pickle for reference. See an example case in the image below.
```
import pickle
with open('class_map.pickle', 'rb') as handle:
class_map = pickle.load(handle)
print(class_map.items())
with open('repeats.pickle', 'rb') as handle:
pairs = pickle.load(handle)
print('{} repeats found'.format(len(pairs.keys())))
for k, v in pairs.items():
print('Repeated image found: {} in classes: {}'.format(k, v))
```

### Demo of Segmentation class
Image workflow:
Segment plankton --> Crop to segmenation --> Resize with major axis and aspect ratio --> Pad zeros to target size
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
from segmentation import Segmentation
data_path = os.path.join(os.getcwd(), 'data')
class_path = os.path.join(data_path, 'Doliolida')
im_path = os.path.join(class_path, 'SPC-PWSSC-1468186366-000556-041-352-1182-520-1216.png')
im_bgr = cv2.imread(im_path, -1)
im_rgb = cv2.cvtColor(im_bgr, cv2.COLOR_BGR2RGB)
plt.imshow(im_rgb)
plt.show()
```
Image cleaning for segmentation is performed:
- Image is converted to grayscale
- Pixels above 1 are thresholded up to 255 (white)
- A gaussian blur is applied using a (5, 5) kernel size
- An ellipse structuring element is used with (3, 3) kernel to perform closing and opening on the image
- Contours are detected. Only keeping the largest contour found
The largest contour is used to produce the following:
- 2D mask of the image (mask)
- An overlay mask where the 2D mask is made transparent over the original image (overlay)
- A segmented version of the image with drawings of features and contour (segmented)
- A padded version of the image which centers a crop from contour and pads to a specified shape (padded)
Images are returned in RGB format (needed for matplotlib.imshow)
```
plankton = Segmentation(im_path, target_shape=(299, 299, 3))
plankton.segment()
mask = plankton.get_mask()
overlay = plankton.get_overlay()
segmented = plankton.get_segmented()
```
Unfortunately matplotlib.imshow does a poor job plotting these images. Here is a picture of all the transformations.

```
padded = plankton.get_padded()
plt.imshow(padded)
plt.show()
```
Notice how the segmented portion has been padded to the center to match the target_shape of (299, 299, 3) for inception_v3
This will ensure consistent image size for any neural network. Original image size has been preserved in the calculated features.
### Demo of padded (One for each class)
```
df = pd.read_csv('plankton.csv')
df.sample(5)
def plot_images(images):
fig, axes = plt.subplots(nrows=6, ncols=6, sharex=False,
sharey=True, figsize=(14,14))
fig.suptitle('Padded Plankton', size=20)
i = 0
for x in range(6):
for y in range(6):
axes[x,y].imshow(images[i])
axes[x,y].get_xaxis().set_visible(False)
axes[x,y].get_yaxis().set_visible(False)
i += 1
data_path = os.path.join(os.getcwd(), 'data')
images = []
for c in list(class_map.keys())[:-1]:
im_name = df[df.label==c].iloc[0,0]
im_dir = os.path.join(data_path, list(class_map.values())[c])
im_path = os.path.join(im_dir, im_name)
plankton = Segmentation(im_path)
plankton.segment()
padded = plankton.get_padded()
images.append(padded)
plot_images(images)
plt.show()
```
### An example use case of calculated features.
Features are calculated on the segmented plankton:
- x_length and y_length (2)
- area (1)
- hu moments (7)
- haralick textures (13)
- local binary patterns (36)
- threshold adjacency statistics (54)
```
from collections import Counter
print(df.shape[0])
for i, name in enumerate(df.im_name):
if name in list(pairs.keys()):
df.drop(i, inplace=True)
print(df.shape[0])
counter = Counter(df.label)
print(counter)
plt.title('Number of images per class')
plt.hist(df.label, bins=np.arange(0,len(class_map.keys()), 1))
plt.show()
```
Removing all duplicate image names, 134 images drop out of the dataframe. The histogram shows severe class imbalance. In order to create some balanced sample data, I will only use 17 examples from each class.
```
frames = []
for c in range(len(counter)):
frames.append(df[df.label==c].sample(n=17))
df_sample = pd.concat(frames)
df_sample = df_sample.reset_index(drop=True)
df_sample.sample(10)
frames = []
for im_name, label in zip(df_sample.im_name, df_sample.label):
dir_name = class_map[label]
dir_path = os.path.join(data_path, dir_name)
im_path = os.path.join(dir_path, im_name)
plankton = Segmentation(im_path)
plankton.segment()
features = plankton.get_features()
frames.append(features)
columns = plankton.get_columns()
df_features = pd.DataFrame(data=frames, columns=columns)
df_features['label'] = df_sample.label
df_features.sample(10)
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
X = df_features.iloc[:,:-1].values
y = df_features.iloc[:,-1].values
print(X.shape, y.shape)
sc = StandardScaler()
X = sc.fit_transform(X)
lr = LogisticRegression()
knn = KNeighborsClassifier(n_neighbors=4)
rf = RandomForestClassifier(n_estimators=100)
mlp = MLPClassifier(max_iter=1000)
names = ['Logistic Regression', 'K Nearest Neighbors',
'Random Forests', 'Multilayer Perceptron']
classifiers = [lr, knn, rf, mlp]
for name, cls in zip(names, classifiers):
cls.fit(X,y)
print('{}: {}'.format(name, cls.score(X,y)))
lr = LogisticRegression()
sfs = SFS(lr,
k_features=X.shape[1],
forward=True,
floating=True,
verbose=0,
scoring='accuracy',
cv=5)
sfs = sfs.fit(X, y)
fig = plot_sfs(sfs.get_metric_dict(), kind='std_dev')
plt.ylim([0.0, 0.5])
plt.title('Logistic Regression (w. StdDev)')
plt.grid()
plt.show()
df_results = pd.DataFrame.from_dict(sfs.get_metric_dict()).T
print(df_results)
from sklearn.manifold import TSNE
import matplotlib as mpl
mpl.rcParams['font.size'] = 16
mpl.rcParams['figure.figsize'] = (14,8)
mpl.rcParams['axes.labelsize'] = 16
X_tsne = TSNE(n_components=2).fit_transform(X)
plt.title('TSNE Decomposition (all features)')
plt.scatter(X_tsne[:,0], X_tsne[:,1], c=y)
plt.show()
X_tsne = TSNE(n_components=2).fit_transform(X[:,:3])
plt.title('TSNE Decomposition (size)')
plt.scatter(X_tsne[:,0], X_tsne[:,1], c=y)
plt.show()
X_tsne = TSNE(n_components=2).fit_transform(X[:,3:16])
plt.title('TSNE Decomposition (haralick)')
plt.scatter(X_tsne[:,0], X_tsne[:,1], c=y)
plt.show()
```
### Conclusion
Data size used here is too small for appropriate modeling. I'm going to need to do some transformations on the images to build up a bigger data size. I used logistic regression wtih sequential feature selection due to it's simplistic nature. The sampling is random, so sometimes different features are selected before others. I made t-SNE decompositions to try and check for some degree of separation in the classes. Using only the size based features, you can start to see clear groupings of certain data points. The haralick only tsne also shows some good clustering. Using the whole feature set of 113 features seems excessive. Data points don't cluster as well as in the reduced feature plots. In my opinion it is probably best to discard local binary patterns and threshold adjacency statistics when merging into a neural network. I would prefer the features to be learned through convolutions and then merge with size estimation features to complete the model.
|
github_jupyter
|
import pickle
with open('class_map.pickle', 'rb') as handle:
class_map = pickle.load(handle)
print(class_map.items())
with open('repeats.pickle', 'rb') as handle:
pairs = pickle.load(handle)
print('{} repeats found'.format(len(pairs.keys())))
for k, v in pairs.items():
print('Repeated image found: {} in classes: {}'.format(k, v))
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
from segmentation import Segmentation
data_path = os.path.join(os.getcwd(), 'data')
class_path = os.path.join(data_path, 'Doliolida')
im_path = os.path.join(class_path, 'SPC-PWSSC-1468186366-000556-041-352-1182-520-1216.png')
im_bgr = cv2.imread(im_path, -1)
im_rgb = cv2.cvtColor(im_bgr, cv2.COLOR_BGR2RGB)
plt.imshow(im_rgb)
plt.show()
plankton = Segmentation(im_path, target_shape=(299, 299, 3))
plankton.segment()
mask = plankton.get_mask()
overlay = plankton.get_overlay()
segmented = plankton.get_segmented()
padded = plankton.get_padded()
plt.imshow(padded)
plt.show()
df = pd.read_csv('plankton.csv')
df.sample(5)
def plot_images(images):
fig, axes = plt.subplots(nrows=6, ncols=6, sharex=False,
sharey=True, figsize=(14,14))
fig.suptitle('Padded Plankton', size=20)
i = 0
for x in range(6):
for y in range(6):
axes[x,y].imshow(images[i])
axes[x,y].get_xaxis().set_visible(False)
axes[x,y].get_yaxis().set_visible(False)
i += 1
data_path = os.path.join(os.getcwd(), 'data')
images = []
for c in list(class_map.keys())[:-1]:
im_name = df[df.label==c].iloc[0,0]
im_dir = os.path.join(data_path, list(class_map.values())[c])
im_path = os.path.join(im_dir, im_name)
plankton = Segmentation(im_path)
plankton.segment()
padded = plankton.get_padded()
images.append(padded)
plot_images(images)
plt.show()
from collections import Counter
print(df.shape[0])
for i, name in enumerate(df.im_name):
if name in list(pairs.keys()):
df.drop(i, inplace=True)
print(df.shape[0])
counter = Counter(df.label)
print(counter)
plt.title('Number of images per class')
plt.hist(df.label, bins=np.arange(0,len(class_map.keys()), 1))
plt.show()
frames = []
for c in range(len(counter)):
frames.append(df[df.label==c].sample(n=17))
df_sample = pd.concat(frames)
df_sample = df_sample.reset_index(drop=True)
df_sample.sample(10)
frames = []
for im_name, label in zip(df_sample.im_name, df_sample.label):
dir_name = class_map[label]
dir_path = os.path.join(data_path, dir_name)
im_path = os.path.join(dir_path, im_name)
plankton = Segmentation(im_path)
plankton.segment()
features = plankton.get_features()
frames.append(features)
columns = plankton.get_columns()
df_features = pd.DataFrame(data=frames, columns=columns)
df_features['label'] = df_sample.label
df_features.sample(10)
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
X = df_features.iloc[:,:-1].values
y = df_features.iloc[:,-1].values
print(X.shape, y.shape)
sc = StandardScaler()
X = sc.fit_transform(X)
lr = LogisticRegression()
knn = KNeighborsClassifier(n_neighbors=4)
rf = RandomForestClassifier(n_estimators=100)
mlp = MLPClassifier(max_iter=1000)
names = ['Logistic Regression', 'K Nearest Neighbors',
'Random Forests', 'Multilayer Perceptron']
classifiers = [lr, knn, rf, mlp]
for name, cls in zip(names, classifiers):
cls.fit(X,y)
print('{}: {}'.format(name, cls.score(X,y)))
lr = LogisticRegression()
sfs = SFS(lr,
k_features=X.shape[1],
forward=True,
floating=True,
verbose=0,
scoring='accuracy',
cv=5)
sfs = sfs.fit(X, y)
fig = plot_sfs(sfs.get_metric_dict(), kind='std_dev')
plt.ylim([0.0, 0.5])
plt.title('Logistic Regression (w. StdDev)')
plt.grid()
plt.show()
df_results = pd.DataFrame.from_dict(sfs.get_metric_dict()).T
print(df_results)
from sklearn.manifold import TSNE
import matplotlib as mpl
mpl.rcParams['font.size'] = 16
mpl.rcParams['figure.figsize'] = (14,8)
mpl.rcParams['axes.labelsize'] = 16
X_tsne = TSNE(n_components=2).fit_transform(X)
plt.title('TSNE Decomposition (all features)')
plt.scatter(X_tsne[:,0], X_tsne[:,1], c=y)
plt.show()
X_tsne = TSNE(n_components=2).fit_transform(X[:,:3])
plt.title('TSNE Decomposition (size)')
plt.scatter(X_tsne[:,0], X_tsne[:,1], c=y)
plt.show()
X_tsne = TSNE(n_components=2).fit_transform(X[:,3:16])
plt.title('TSNE Decomposition (haralick)')
plt.scatter(X_tsne[:,0], X_tsne[:,1], c=y)
plt.show()
| 0.464659 | 0.928376 |
```
# TODO read from parameters
BASE_PATH = '../facebook-denisgrafov/'
import json
from datetime import datetime
from pytz import timezone
import pytz
def is_group_comment(comment_json):
if 'group' in comment_json['data'][0]['comment']:
return 1
return 0
def add_to_history(history, year, month):
if year not in history:
history[year] = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0}
history[year][month] += 1
def add_empty_months(history):
for year in history.values():
for month in range(1, 13):
if month not in year:
year[month] = 0
def process_json_item_dates(item, history, unique_days, times_of_day_week, times_of_day_weekend):
# get the date of the item
date_utc = datetime.utcfromtimestamp(item['timestamp']).replace(tzinfo=pytz.utc)
date = date_utc.astimezone(timezone('Europe/Berlin'))
# increment days counter
unique_days[date.weekday()] += 1
# add the comment to the history per year and month (2 dimensional)
add_to_history(history, date.year, date.month)
# increment times of day counter
if date.weekday() < 5:
# week day
times_of_day_week[date.hour] += 1
else:
#weekend
times_of_day_weekend[date.hour] += 1
with open(BASE_PATH + 'comments/comments.json') as json_data:
comments_json = json.load(json_data)
group = 0
comments_per_unique_days = [0] * 7
comments_history = {}
comments_times_of_day_week = [0] * 24
comments_times_of_day_weekend = [0] * 24
for comment in comments_json['comments']:
group += is_group_comment(comment)
process_json_item_dates(comment, comments_history, comments_per_unique_days, comments_times_of_day_week, comments_times_of_day_weekend)
# add empty months
add_empty_months(comments_history)
import matplotlib.pyplot as plt
SMALL_SIZE = 14
MEDIUM_SIZE = 18
BIGGER_SIZE = 20
FIGURE_WIDTH = 10.0
FIGURE_HEIGHT = 8.0
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rc('figure', figsize=(FIGURE_WIDTH, FIGURE_HEIGHT)) # default size of figures (inches)
labels = ('Comments to groups', 'Personal comments')
sizes = [group, len(comments_json['comments']) - group]
colors = ['lightcoral', 'lightskyblue']
explode = (0.1, 0)
plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)
plt.text(-2, 1, 'Total comments: ' + str(len(comments_json['comments']))
+ '\nComments to groups: ' + str(group)
+ '\nPersonal comments: ' + str(len(comments_json['comments']) - group))
plt.axis('equal')
plt.show()
def bar_horizontal_grid(ax, bars, heights, title = None):
ax.bar(bars, heights)
ax.yaxis.grid()
ax.get_xaxis().set_ticks(bars)
if title:
ax.set_title(title)
return ax
def plot_items_per_year(ax, history, title=None):
per_year = {}
for year, months_of_year in history.items():
per_year[year] = sum(months_of_year.values())
bars, heights = zip(*sorted(per_year.items()))
return bar_horizontal_grid(ax, bars, heights, title)
def show_single_plot(func, data, title, size=None):
f, ax = plt.subplots()
func(ax, data)
f.suptitle(title)
if size:
f.set_size_inches(size)
plt.show()
show_single_plot(plot_items_per_year, comments_history, 'Comments per year')
import calendar
def plot_items_per_month(ax, history, title=None):
per_unique_months = [0] * 12
for months_of_year in history.values():
for month, value in months_of_year.items():
per_unique_months[month - 1] += value
bars = calendar.month_name[1:]
bar_horizontal_grid(ax, bars, per_unique_months, title).tick_params(axis='x', rotation=90)
show_single_plot(plot_items_per_month, comments_history, 'Comments per month')
def plot_items_per_weekday(ax, weekdays, title=None):
bars = calendar.day_name
bar_horizontal_grid(ax, bars, weekdays, title)
show_single_plot(plot_items_per_weekday, comments_per_unique_days, 'Comments per weekday')
def flatten_history(history):
flat_history = {}
for year, months_in_year in history.items():
for month, value in months_in_year.items():
month_id = str(year) + '.{:0=2d}'.format(month)
flat_history[month_id] = value
return flat_history
def plot_items_history(ax, flat_history):
bars, heights = zip(*sorted(flat_history.items()))
line, = ax.plot(bars, heights)
ax.yaxis.grid()
ax.get_xaxis().set_ticks(bars)
ax.tick_params(axis='x', rotation=90)
return line
show_single_plot(plot_items_history, flatten_history(comments_history), 'Comments history (months)', (FIGURE_WIDTH * 2, FIGURE_HEIGHT))
x = range(len(comments_times_of_day_weekend))
f, (week_plot, weekend_plot) = plt.subplots(1, 2, sharey=True)
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT)
f.suptitle('Comments per time of a day')
bar_horizontal_grid(week_plot, x, comments_times_of_day_week, 'Week days')
bar_horizontal_grid(weekend_plot, x, comments_times_of_day_weekend, 'Weekend days')\
.annotate('"Thank you!" comments\nafter my birthday', xy=(11, 25), xytext=(14, 32), arrowprops=dict(facecolor='black'))
plt.show()
for comment in comments_json['comments']:
# get the date of the comment
date_utc = datetime.utcfromtimestamp(comment['timestamp']).replace(tzinfo=pytz.utc)
date = date_utc.astimezone(timezone('Europe/Berlin'))
if date.weekday() > 4 and date.hour == 11:
print(date.strftime('%b %d %Y %H:%M:%S'))
import re
with open(BASE_PATH + 'likes_and_reactions/posts_and_comments.json') as json_data:
likes_json = json.load(json_data)
likes_per_unique_days = [0] * 7
likes_history = {}
likes_times_of_day_week = [0] * 24
likes_times_of_day_weekend = [0] * 24
likes_targets = {}
for like in likes_json['reactions']:
title = like['title']
# Extracting word from the following line: 's comment.
title = title[title.find('\'s') + 3:]
target = re.split('\W+', title, 1)[0]
if target in likes_targets:
likes_targets[target] += 1
else:
likes_targets[target] = 1
process_json_item_dates(like, likes_history, likes_per_unique_days, likes_times_of_day_week, likes_times_of_day_weekend)
# add empty months
add_empty_months(likes_history)
labels = ('Posts', 'Others', 'Comments', 'Photos')
others = 0
for key, value in likes_targets.items():
if key != 'post' and key != 'photo' and key != 'comment':
others += value
sizes = [likes_targets['post'], others, likes_targets['comment'], likes_targets['photo']]
explode = (0.1, 0, 0, 0)
plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=0)
plt.text(-2, 1, 'Total likes: ' + str(len(likes_json['reactions']))
+ '\nLikes to posts: ' + str(likes_targets['post'])
+ '\nLikes to photos: ' + str(likes_targets['photo'])
+ '\nLikes to comments: ' + str(likes_targets['comment'])
+ '\nLikes to links: ' + str(likes_targets['link'])
+ '\nLikes to videos: ' + str(likes_targets['video'])
+ '\nLikes to activities: ' + str(likes_targets['activity'])
+ '\nLikes to album: ' + str(likes_targets['album'])
+ '\nLikes to biographies: ' + str(likes_targets['bio'])
+ '\nLikes to notes: ' + str(likes_targets['note'])
+ '\nLikes to life: ' + str(likes_targets['life']))
plt.axis('equal')
plt.show()
f, (likes_plot, comments_plot) = plt.subplots(1, 2, sharey=True)
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT)
f.suptitle('Likes and comments per year')
plot_items_per_year(likes_plot, likes_history, 'Likes per year')
plot_items_per_year(comments_plot, comments_history, 'Comments per year')
plt.show()
f, (likes_plot, comments_plot) = plt.subplots(1, 2, sharey=True)
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT)
f.suptitle('Likes and comments per month')
plot_items_per_month(likes_plot, likes_history, 'Likes per month')
plot_items_per_month(comments_plot, comments_history, 'Comments per month')
plt.show()
f, (likes_plot, comments_plot) = plt.subplots(1, 2, sharey=True)
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT)
f.suptitle('Likes and comments per weekday')
plot_items_per_weekday(likes_plot, likes_per_unique_days, 'Likes per weekday')
plot_items_per_weekday(comments_plot, comments_per_unique_days, 'Comments per weekday')
plt.show()
flat_comments_history = flatten_history(comments_history)
flat_likes_history = flatten_history(likes_history)
#likes history is longer than comments history:
diff = set(flat_likes_history.keys()) - set(flat_comments_history.keys())
for month in diff:
flat_comments_history[month] = 0
f, ax = plt.subplots()
likes_line = plot_items_history(ax, flat_likes_history)
comments_line = plot_items_history(ax, flat_comments_history)
ax.legend((likes_line, comments_line), ('Likes', 'Comments'))
f.suptitle('Likes and comments history (months)')
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT)
plt.show()
x = range(len(likes_times_of_day_week))
f, axes = plt.subplots(2, 2, sharey=True)
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT * 2)
f.suptitle('Likes and comments per time of a day')
bar_horizontal_grid(axes[0][0], x, likes_times_of_day_week, 'Likes times on weekdays')
bar_horizontal_grid(axes[0][1], x, likes_times_of_day_weekend, 'Likes times on weekends')
bar_horizontal_grid(axes[1][0], x, comments_times_of_day_week, 'Comments times on weekdays')
bar_horizontal_grid(axes[1][1], x, comments_times_of_day_weekend, 'Comments times on weekends')
plt.show()
import os
# traverse root directory, and list directories as dirs and files as files
for root, dirs, files in os.walk(BASE_PATH + 'messages'):
if 'message.json' in files:
print(root + '/message.json')
```
|
github_jupyter
|
# TODO read from parameters
BASE_PATH = '../facebook-denisgrafov/'
import json
from datetime import datetime
from pytz import timezone
import pytz
def is_group_comment(comment_json):
if 'group' in comment_json['data'][0]['comment']:
return 1
return 0
def add_to_history(history, year, month):
if year not in history:
history[year] = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0}
history[year][month] += 1
def add_empty_months(history):
for year in history.values():
for month in range(1, 13):
if month not in year:
year[month] = 0
def process_json_item_dates(item, history, unique_days, times_of_day_week, times_of_day_weekend):
# get the date of the item
date_utc = datetime.utcfromtimestamp(item['timestamp']).replace(tzinfo=pytz.utc)
date = date_utc.astimezone(timezone('Europe/Berlin'))
# increment days counter
unique_days[date.weekday()] += 1
# add the comment to the history per year and month (2 dimensional)
add_to_history(history, date.year, date.month)
# increment times of day counter
if date.weekday() < 5:
# week day
times_of_day_week[date.hour] += 1
else:
#weekend
times_of_day_weekend[date.hour] += 1
with open(BASE_PATH + 'comments/comments.json') as json_data:
comments_json = json.load(json_data)
group = 0
comments_per_unique_days = [0] * 7
comments_history = {}
comments_times_of_day_week = [0] * 24
comments_times_of_day_weekend = [0] * 24
for comment in comments_json['comments']:
group += is_group_comment(comment)
process_json_item_dates(comment, comments_history, comments_per_unique_days, comments_times_of_day_week, comments_times_of_day_weekend)
# add empty months
add_empty_months(comments_history)
import matplotlib.pyplot as plt
SMALL_SIZE = 14
MEDIUM_SIZE = 18
BIGGER_SIZE = 20
FIGURE_WIDTH = 10.0
FIGURE_HEIGHT = 8.0
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rc('figure', figsize=(FIGURE_WIDTH, FIGURE_HEIGHT)) # default size of figures (inches)
labels = ('Comments to groups', 'Personal comments')
sizes = [group, len(comments_json['comments']) - group]
colors = ['lightcoral', 'lightskyblue']
explode = (0.1, 0)
plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)
plt.text(-2, 1, 'Total comments: ' + str(len(comments_json['comments']))
+ '\nComments to groups: ' + str(group)
+ '\nPersonal comments: ' + str(len(comments_json['comments']) - group))
plt.axis('equal')
plt.show()
def bar_horizontal_grid(ax, bars, heights, title = None):
ax.bar(bars, heights)
ax.yaxis.grid()
ax.get_xaxis().set_ticks(bars)
if title:
ax.set_title(title)
return ax
def plot_items_per_year(ax, history, title=None):
per_year = {}
for year, months_of_year in history.items():
per_year[year] = sum(months_of_year.values())
bars, heights = zip(*sorted(per_year.items()))
return bar_horizontal_grid(ax, bars, heights, title)
def show_single_plot(func, data, title, size=None):
f, ax = plt.subplots()
func(ax, data)
f.suptitle(title)
if size:
f.set_size_inches(size)
plt.show()
show_single_plot(plot_items_per_year, comments_history, 'Comments per year')
import calendar
def plot_items_per_month(ax, history, title=None):
per_unique_months = [0] * 12
for months_of_year in history.values():
for month, value in months_of_year.items():
per_unique_months[month - 1] += value
bars = calendar.month_name[1:]
bar_horizontal_grid(ax, bars, per_unique_months, title).tick_params(axis='x', rotation=90)
show_single_plot(plot_items_per_month, comments_history, 'Comments per month')
def plot_items_per_weekday(ax, weekdays, title=None):
bars = calendar.day_name
bar_horizontal_grid(ax, bars, weekdays, title)
show_single_plot(plot_items_per_weekday, comments_per_unique_days, 'Comments per weekday')
def flatten_history(history):
flat_history = {}
for year, months_in_year in history.items():
for month, value in months_in_year.items():
month_id = str(year) + '.{:0=2d}'.format(month)
flat_history[month_id] = value
return flat_history
def plot_items_history(ax, flat_history):
bars, heights = zip(*sorted(flat_history.items()))
line, = ax.plot(bars, heights)
ax.yaxis.grid()
ax.get_xaxis().set_ticks(bars)
ax.tick_params(axis='x', rotation=90)
return line
show_single_plot(plot_items_history, flatten_history(comments_history), 'Comments history (months)', (FIGURE_WIDTH * 2, FIGURE_HEIGHT))
x = range(len(comments_times_of_day_weekend))
f, (week_plot, weekend_plot) = plt.subplots(1, 2, sharey=True)
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT)
f.suptitle('Comments per time of a day')
bar_horizontal_grid(week_plot, x, comments_times_of_day_week, 'Week days')
bar_horizontal_grid(weekend_plot, x, comments_times_of_day_weekend, 'Weekend days')\
.annotate('"Thank you!" comments\nafter my birthday', xy=(11, 25), xytext=(14, 32), arrowprops=dict(facecolor='black'))
plt.show()
for comment in comments_json['comments']:
# get the date of the comment
date_utc = datetime.utcfromtimestamp(comment['timestamp']).replace(tzinfo=pytz.utc)
date = date_utc.astimezone(timezone('Europe/Berlin'))
if date.weekday() > 4 and date.hour == 11:
print(date.strftime('%b %d %Y %H:%M:%S'))
import re
with open(BASE_PATH + 'likes_and_reactions/posts_and_comments.json') as json_data:
likes_json = json.load(json_data)
likes_per_unique_days = [0] * 7
likes_history = {}
likes_times_of_day_week = [0] * 24
likes_times_of_day_weekend = [0] * 24
likes_targets = {}
for like in likes_json['reactions']:
title = like['title']
# Extracting word from the following line: 's comment.
title = title[title.find('\'s') + 3:]
target = re.split('\W+', title, 1)[0]
if target in likes_targets:
likes_targets[target] += 1
else:
likes_targets[target] = 1
process_json_item_dates(like, likes_history, likes_per_unique_days, likes_times_of_day_week, likes_times_of_day_weekend)
# add empty months
add_empty_months(likes_history)
labels = ('Posts', 'Others', 'Comments', 'Photos')
others = 0
for key, value in likes_targets.items():
if key != 'post' and key != 'photo' and key != 'comment':
others += value
sizes = [likes_targets['post'], others, likes_targets['comment'], likes_targets['photo']]
explode = (0.1, 0, 0, 0)
plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=0)
plt.text(-2, 1, 'Total likes: ' + str(len(likes_json['reactions']))
+ '\nLikes to posts: ' + str(likes_targets['post'])
+ '\nLikes to photos: ' + str(likes_targets['photo'])
+ '\nLikes to comments: ' + str(likes_targets['comment'])
+ '\nLikes to links: ' + str(likes_targets['link'])
+ '\nLikes to videos: ' + str(likes_targets['video'])
+ '\nLikes to activities: ' + str(likes_targets['activity'])
+ '\nLikes to album: ' + str(likes_targets['album'])
+ '\nLikes to biographies: ' + str(likes_targets['bio'])
+ '\nLikes to notes: ' + str(likes_targets['note'])
+ '\nLikes to life: ' + str(likes_targets['life']))
plt.axis('equal')
plt.show()
f, (likes_plot, comments_plot) = plt.subplots(1, 2, sharey=True)
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT)
f.suptitle('Likes and comments per year')
plot_items_per_year(likes_plot, likes_history, 'Likes per year')
plot_items_per_year(comments_plot, comments_history, 'Comments per year')
plt.show()
f, (likes_plot, comments_plot) = plt.subplots(1, 2, sharey=True)
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT)
f.suptitle('Likes and comments per month')
plot_items_per_month(likes_plot, likes_history, 'Likes per month')
plot_items_per_month(comments_plot, comments_history, 'Comments per month')
plt.show()
f, (likes_plot, comments_plot) = plt.subplots(1, 2, sharey=True)
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT)
f.suptitle('Likes and comments per weekday')
plot_items_per_weekday(likes_plot, likes_per_unique_days, 'Likes per weekday')
plot_items_per_weekday(comments_plot, comments_per_unique_days, 'Comments per weekday')
plt.show()
flat_comments_history = flatten_history(comments_history)
flat_likes_history = flatten_history(likes_history)
#likes history is longer than comments history:
diff = set(flat_likes_history.keys()) - set(flat_comments_history.keys())
for month in diff:
flat_comments_history[month] = 0
f, ax = plt.subplots()
likes_line = plot_items_history(ax, flat_likes_history)
comments_line = plot_items_history(ax, flat_comments_history)
ax.legend((likes_line, comments_line), ('Likes', 'Comments'))
f.suptitle('Likes and comments history (months)')
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT)
plt.show()
x = range(len(likes_times_of_day_week))
f, axes = plt.subplots(2, 2, sharey=True)
f.set_size_inches(FIGURE_WIDTH * 2, FIGURE_HEIGHT * 2)
f.suptitle('Likes and comments per time of a day')
bar_horizontal_grid(axes[0][0], x, likes_times_of_day_week, 'Likes times on weekdays')
bar_horizontal_grid(axes[0][1], x, likes_times_of_day_weekend, 'Likes times on weekends')
bar_horizontal_grid(axes[1][0], x, comments_times_of_day_week, 'Comments times on weekdays')
bar_horizontal_grid(axes[1][1], x, comments_times_of_day_weekend, 'Comments times on weekends')
plt.show()
import os
# traverse root directory, and list directories as dirs and files as files
for root, dirs, files in os.walk(BASE_PATH + 'messages'):
if 'message.json' in files:
print(root + '/message.json')
| 0.315525 | 0.412234 |
# Classical simulation of quantum dots
In this file we show some of the capabilities of the `ClassicalDotSystem`, which can simulate simple quantum dot systems.
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from qtt.simulation.classicaldotsystem import ClassicalDotSystem, TripleDot
```
We define a linear triple-dot system using the `TripleDot` class, which inherits from the `ClassicalDotSystem`.
```
test_dot = TripleDot()
```
The function `calculate_ground_state` calculates the number of electrons in the ground state for given values of the voltages on the gates forming the dots.
```
temp_state = test_dot.calculate_ground_state(np.array([0,0,0]));
print(temp_state)
temp_state = test_dot.calculate_ground_state(np.array([120,0,100]));
print(temp_state)
# make a test gate plane
nx = 150
ny = 150
test2Dparams = np.zeros((3,nx,ny))
sweepx = np.linspace(0, 300, nx)
sweepy = np.linspace(0, 300, ny)
xv, yv = np.meshgrid(sweepx, sweepy)
test2Dparams[0] = xv+.1*yv
xv, yv = np.meshgrid(sweepy, sweepx)
test2Dparams[1] = .1*xv+yv
# run the honeycomb simulation
test_dot.simulate_honeycomb(test2Dparams, multiprocess=False, verbose=1)
plt.pcolor(sweepx,sweepy,test_dot.honeycomb, shading='auto')
plt.colorbar()
plt.show()
test2Dparams[2] = 103
# run the honeycomb simulation again
test_dot.simulate_honeycomb(test2Dparams, multiprocess=False, verbose=1)
plt.pcolor(sweepx,sweepy,test_dot.honeycomb, shading='auto')
plt.colorbar()
plt.show()
```
## Defining your own system
```
class TestDot(ClassicalDotSystem):
def __init__(self, name='testdot', **kwargs):
super().__init__(name=name, ndots=3, ngates=3, maxelectrons=2, **kwargs)
self.makebasis(ndots=3)
vardict = {}
vardict["mu0_values"] = np.array([-27.0, -20.0, -25.0]) # chemical potential at zero gate voltage
vardict["Eadd_values"] = np.array([54.0, 52.8, 54.0]) # addition energy
vardict["W_values"] = np.array([12.0, 5.0, 10.0]) # inter-site Coulomb repulsion (!order is important: (1,2), (1,3), (2,3)) (lexicographic ordering)
vardict["alpha_values"] = np.array([[1.0, 0.25, 0.1],
[0.25, 1.0, 0.25],
[0.1, 0.25, 1.0]])
for name in self.varnames:
setattr(self, name, vardict[name+'_values'])
test_dot_2 = TestDot()
# make a test gate plane
nx = 10
ny = 10
test2Dparams = np.zeros((test_dot_2.ngates,nx,ny))
sweepx = np.linspace(-100, 460, nx)
sweepy = np.linspace(-100, 500, ny)
xv, yv = np.meshgrid(sweepx, sweepy)
test2Dparams[0] = xv+.1*yv
xv, yv = np.meshgrid(sweepy, sweepx)
test2Dparams[2] = .1*xv+yv
# run the honeycomb simulation
test_dot_2.simulate_honeycomb(test2Dparams, multiprocess=False)
plt.clf()
plt.pcolor(sweepx,sweepy,test_dot_2.hcgs.sum(axis=2), shading='auto')
plt.colorbar()
plt.show()
plt.clf()
plt.pcolor(sweepx,sweepy,test_dot_2.honeycomb, shading='auto')
plt.colorbar()
plt.show()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from qtt.simulation.classicaldotsystem import ClassicalDotSystem, TripleDot
test_dot = TripleDot()
temp_state = test_dot.calculate_ground_state(np.array([0,0,0]));
print(temp_state)
temp_state = test_dot.calculate_ground_state(np.array([120,0,100]));
print(temp_state)
# make a test gate plane
nx = 150
ny = 150
test2Dparams = np.zeros((3,nx,ny))
sweepx = np.linspace(0, 300, nx)
sweepy = np.linspace(0, 300, ny)
xv, yv = np.meshgrid(sweepx, sweepy)
test2Dparams[0] = xv+.1*yv
xv, yv = np.meshgrid(sweepy, sweepx)
test2Dparams[1] = .1*xv+yv
# run the honeycomb simulation
test_dot.simulate_honeycomb(test2Dparams, multiprocess=False, verbose=1)
plt.pcolor(sweepx,sweepy,test_dot.honeycomb, shading='auto')
plt.colorbar()
plt.show()
test2Dparams[2] = 103
# run the honeycomb simulation again
test_dot.simulate_honeycomb(test2Dparams, multiprocess=False, verbose=1)
plt.pcolor(sweepx,sweepy,test_dot.honeycomb, shading='auto')
plt.colorbar()
plt.show()
class TestDot(ClassicalDotSystem):
def __init__(self, name='testdot', **kwargs):
super().__init__(name=name, ndots=3, ngates=3, maxelectrons=2, **kwargs)
self.makebasis(ndots=3)
vardict = {}
vardict["mu0_values"] = np.array([-27.0, -20.0, -25.0]) # chemical potential at zero gate voltage
vardict["Eadd_values"] = np.array([54.0, 52.8, 54.0]) # addition energy
vardict["W_values"] = np.array([12.0, 5.0, 10.0]) # inter-site Coulomb repulsion (!order is important: (1,2), (1,3), (2,3)) (lexicographic ordering)
vardict["alpha_values"] = np.array([[1.0, 0.25, 0.1],
[0.25, 1.0, 0.25],
[0.1, 0.25, 1.0]])
for name in self.varnames:
setattr(self, name, vardict[name+'_values'])
test_dot_2 = TestDot()
# make a test gate plane
nx = 10
ny = 10
test2Dparams = np.zeros((test_dot_2.ngates,nx,ny))
sweepx = np.linspace(-100, 460, nx)
sweepy = np.linspace(-100, 500, ny)
xv, yv = np.meshgrid(sweepx, sweepy)
test2Dparams[0] = xv+.1*yv
xv, yv = np.meshgrid(sweepy, sweepx)
test2Dparams[2] = .1*xv+yv
# run the honeycomb simulation
test_dot_2.simulate_honeycomb(test2Dparams, multiprocess=False)
plt.clf()
plt.pcolor(sweepx,sweepy,test_dot_2.hcgs.sum(axis=2), shading='auto')
plt.colorbar()
plt.show()
plt.clf()
plt.pcolor(sweepx,sweepy,test_dot_2.honeycomb, shading='auto')
plt.colorbar()
plt.show()
| 0.483405 | 0.981257 |
```
# Water Treatment Plant Clustering Analysis
# Dataset Reference: https://archive.ics.uci.edu/ml/datasets/Water+Treatment+Plant
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("/Users/gauravsharma/Documents/python/water-treatment.csv", header = None)
data.head()
data.shape
# Assigning the Column Names
data.columns = ("Date", "Q_E", "ZN_E", "PH_E", "DBO_E", "DQO_E", "SS_E",
"SSV_E", "SED_E", "COND_E", "PH_P", "DBO_P", "SS_P", "SSV_P", "SED_P", "COND_P",
"PH_D", "DBO_D", "DQO_D", "SS_D", "SSV_D", "SED_D", "COND_D", "PH_S", "DBO_S",
"DQO_S", "SS_S", "SSV_S", "SED_S", "COND_S", "RD_DBO_P", "RD_SS_P", "RD_SED_P",
"RD_DBO_S", "RD_DQO_S", "RD_DBO_G", "RD_DQO_G", "RD_SS_G", "RD_SED_G")
data.head()
# Cleaning the dataset
# Replacing the "?" with null values
data.replace("?", np.nan, inplace = True)
data.head()
# To process further, conveting the dataset type to float
data['Q_E'] = data['Q_E'].astype(float)
data['ZN_E'] = data['ZN_E'].astype(float)
data['DBO_E'] = data['DBO_E'].astype(float)
data['DQO_E'] = data['DQO_E'].astype(float)
data['SS_E'] = data['SS_E'].astype(float)
data['SSV_E'] = data['SSV_E'].astype(float)
data['SED_E'] = data['SED_E'].astype(float)
data['COND_E'] = data['COND_E'].astype(float)
data['DBO_P'] = data['DBO_P'].astype(float)
data['SSV_P'] = data['SSV_P'].astype(float)
data['SED_P'] = data['SED_P'].astype(float)
data['COND_P'] = data['COND_P'].astype(float)
data['DBO_D'] = data['DBO_D'].astype(float)
data['SS_P'] = data['SS_P'].astype(float)
data['SSV_P'] = data['SSV_P'].astype(float)
data['SED_P'] = data['SED_P'].astype(float)
data['COND_P'] = data['COND_P'].astype(float)
data['DBO_D'] = data['DBO_D'].astype(float)
data['DQO_D'] = data['DQO_D'].astype(float)
data['SS_D'] = data['SS_D'].astype(float)
data['SSV_D'] = data['SSV_D'].astype(float)
data['SED_D'] = data['SED_D'].astype(float)
data['COND_D'] = data['COND_D'].astype(float)
data['PH_S'] = data['PH_S'].astype(float)
data['DBO_S'] = data['DBO_S'].astype(float)
data['DQO_S'] = data['DQO_S'].astype(float)
data['SS_S'] = data['SS_S'].astype(float)
data['SSV_S'] = data['SSV_S'].astype(float)
data['SED_S'] = data['SED_S'].astype(float)
data['COND_S'] = data['COND_S'].astype(float)
data['RD_DBO_P'] = data['RD_DBO_P'].astype(float)
data['RD_SS_P'] = data['RD_SS_P'].astype(float)
data['RD_SED_P'] = data['RD_SED_P'].astype(float)
data['RD_DBO_S'] = data['RD_DBO_S'].astype(float)
data['RD_DQO_S'] = data['RD_DQO_S'].astype(float)
data['RD_DBO_G'] = data['RD_DBO_G'].astype(float)
data['RD_DQO_G'] = data['RD_DQO_G'].astype(float)
data['RD_SS_G'] = data['RD_SS_G'].astype(float)
data['RD_SED_G'] = data['RD_SED_G'].astype(float)
data.dtypes
# Dropping out "Date" Column
Data = data.drop(['Date'], axis = 1)
Data.head()
Data.info
# checking for missing values
print(Data.isna().sum())
plt.figure(figsize = (20,12))
sns.heatmap(Data.isnull())
plt.show()
# Replacing the null values with mean-value of respective column
df = Data.fillna(Data.mean())
df.isna()
print(df.isna().sum())
df.info()
df.shape
df.describe()
df_corr = df.corr()
df_corr
sns.heatmap(df_corr)
from sklearn import preprocessing
scaler = preprocessing.StandardScaler()
scaler.fit(df)
Data_scaled_array = scaler.transform(df)
Data_scaled = pd.DataFrame(Data_scaled_array, columns = df.columns)
Data_scaled.sample(5)
# K-Means Clustering
from sklearn.cluster import KMeans
nclusters = 4 # this is the k in kmeans
seed = 0
km = KMeans(n_clusters=nclusters, random_state=seed)
km.fit(Data_scaled)
# predict the cluster for each data point
y_cluster_kmeans = km.predict(Data_scaled)
y_cluster_kmeans
from sklearn import metrics
score = metrics.silhouette_score(Data_scaled, y_cluster_kmeans)
score
scores = metrics.silhouette_samples(Data_scaled, y_cluster_kmeans)
sns.distplot(scores);
from sklearn.cluster import KMeans
wcss = []
for k in range(1, 15):
kmeansForLoop = KMeans(n_clusters = k)
kmeansForLoop.fit(df)
wcss.append(kmeansForLoop.inertia_)
plt.figure(figsize = (15, 15))
plt.plot(range(1, 15), wcss)
plt.xlabel("K value")
plt.ylabel("WCSS")
plt.show()
from sklearn.metrics import silhouette_score
silhouette_scores = []
for n_cluster in range(2,30):
silhouette_scores.append(
silhouette_score(df, KMeans(n_clusters = n_cluster).fit_predict(df)))
# Plotting a bar graph to compare the results
k = [2, 3, 4, 5, 6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29]
plt.bar(k, silhouette_scores)
plt.xlabel('Number of clusters', fontsize = 10)
plt.ylabel('Silhouette Score', fontsize = 10)
plt.grid()
plt.show()
nclusters = 2 # this is the k in kmeans
seed = 0
km = KMeans(n_clusters=nclusters, random_state=seed)
km.fit(Data_scaled)
# predict the cluster for each data point
y_cluster_kmeans = km.predict(Data_scaled)
y_cluster_kmeans
score = metrics.silhouette_score(Data_scaled, y_cluster_kmeans)
score
km.inertia_
km.cluster_centers_
Data_scaled.head()
kmeans = KMeans(n_clusters = 2)
clusters = kmeans.fit_predict(Data_scaled)
Data_scaled["type"] = clusters
Data_scaled["type"].unique()
Data_scaled.head()
Data_scaled.describe()
plt.figure(figsize = (15, 10))
plt.scatter(Data_scaled["SED_E"][Data_scaled["type"] == 0], Data_scaled["PH_E"][Data_scaled["type"] == 0], color = "red")
plt.scatter(Data_scaled["SED_E"][Data_scaled["type"] == 1], Data_scaled["PH_E"][Data_scaled["type"] == 1], color = "green")
plt.xlabel('SED_E')
plt.ylabel('PH_E')
plt.show()
kmeans.cluster_centers_
# K-means++
from sklearn.cluster import KMeans
wcss=[]
#we always assume the max number of cluster would be 10
#you can judge the number of clusters by doing averaging
###Static code to get max no of clusters
for i in range(1,30):
kmeans = KMeans(n_clusters= i, init='k-means++', random_state=0)
kmeans.fit(df)
wcss.append(kmeans.inertia_)
#Visualizing the ELBOW method to get the optimal value of K
plt.plot(range(1,30), wcss)
plt.title('The Elbow Method')
plt.xlabel('no of clusters')
plt.ylabel('wcss')
plt.show()
#Model Build (K-means)
kmeansmodel = KMeans(n_clusters= 5, random_state=0)
y_kmeans= kmeansmodel.fit_predict(df)
score = metrics.silhouette_score(df, y_kmeans)
score
# silhouette score value near to 0.5 or better provides good evidence of the reality of the clusters in the data
# Agglomerative Clustering
df.head()
plt.figure(figsize = (15, 15))
plt.scatter(df["SED_E"], df["PH_E"])
plt.xlabel('SED_E')
plt.ylabel('PH_E')
plt.show()
from scipy.cluster.hierarchy import linkage,dendrogram
merg = linkage(df, method = "ward")
dendrogram(merg, leaf_rotation = 90)
plt.xlabel("data points")
plt.ylabel("euclidean distance")
plt.show()
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = 3, affinity = "euclidean", linkage = "ward")
cluster = hc.fit_predict(df)
Data["label"] = cluster
Data.label.value_counts()
dataWithoutTypes= df[['SED_E','PH_E']]
dataWithoutTypes.head()
from scipy.cluster.hierarchy import linkage,dendrogram
merg = linkage(dataWithoutTypes, method = "ward")
dendrogram(merg, leaf_rotation = 90)
plt.xlabel("data points")
plt.ylabel("euclidean distance")
plt.show()
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = 2, affinity = "euclidean", linkage = "ward")
cluster = hc.fit_predict(dataWithoutTypes)
dataWithoutTypes["label"] = cluster
dataWithoutTypes.label.value_counts()
# Data after hierarchical clustering
plt.figure(figsize = (15, 10))
plt.scatter(dataWithoutTypes["SED_E"][dataWithoutTypes.label == 0], dataWithoutTypes["PH_E"][dataWithoutTypes.label == 0], color = "red")
plt.scatter(dataWithoutTypes["SED_E"][dataWithoutTypes.label == 1], dataWithoutTypes["PH_E"][dataWithoutTypes.label == 1], color = "blue")
plt.xlabel("SED_E")
plt.ylabel("PH_E")
plt.show()
```
|
github_jupyter
|
# Water Treatment Plant Clustering Analysis
# Dataset Reference: https://archive.ics.uci.edu/ml/datasets/Water+Treatment+Plant
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("/Users/gauravsharma/Documents/python/water-treatment.csv", header = None)
data.head()
data.shape
# Assigning the Column Names
data.columns = ("Date", "Q_E", "ZN_E", "PH_E", "DBO_E", "DQO_E", "SS_E",
"SSV_E", "SED_E", "COND_E", "PH_P", "DBO_P", "SS_P", "SSV_P", "SED_P", "COND_P",
"PH_D", "DBO_D", "DQO_D", "SS_D", "SSV_D", "SED_D", "COND_D", "PH_S", "DBO_S",
"DQO_S", "SS_S", "SSV_S", "SED_S", "COND_S", "RD_DBO_P", "RD_SS_P", "RD_SED_P",
"RD_DBO_S", "RD_DQO_S", "RD_DBO_G", "RD_DQO_G", "RD_SS_G", "RD_SED_G")
data.head()
# Cleaning the dataset
# Replacing the "?" with null values
data.replace("?", np.nan, inplace = True)
data.head()
# To process further, conveting the dataset type to float
data['Q_E'] = data['Q_E'].astype(float)
data['ZN_E'] = data['ZN_E'].astype(float)
data['DBO_E'] = data['DBO_E'].astype(float)
data['DQO_E'] = data['DQO_E'].astype(float)
data['SS_E'] = data['SS_E'].astype(float)
data['SSV_E'] = data['SSV_E'].astype(float)
data['SED_E'] = data['SED_E'].astype(float)
data['COND_E'] = data['COND_E'].astype(float)
data['DBO_P'] = data['DBO_P'].astype(float)
data['SSV_P'] = data['SSV_P'].astype(float)
data['SED_P'] = data['SED_P'].astype(float)
data['COND_P'] = data['COND_P'].astype(float)
data['DBO_D'] = data['DBO_D'].astype(float)
data['SS_P'] = data['SS_P'].astype(float)
data['SSV_P'] = data['SSV_P'].astype(float)
data['SED_P'] = data['SED_P'].astype(float)
data['COND_P'] = data['COND_P'].astype(float)
data['DBO_D'] = data['DBO_D'].astype(float)
data['DQO_D'] = data['DQO_D'].astype(float)
data['SS_D'] = data['SS_D'].astype(float)
data['SSV_D'] = data['SSV_D'].astype(float)
data['SED_D'] = data['SED_D'].astype(float)
data['COND_D'] = data['COND_D'].astype(float)
data['PH_S'] = data['PH_S'].astype(float)
data['DBO_S'] = data['DBO_S'].astype(float)
data['DQO_S'] = data['DQO_S'].astype(float)
data['SS_S'] = data['SS_S'].astype(float)
data['SSV_S'] = data['SSV_S'].astype(float)
data['SED_S'] = data['SED_S'].astype(float)
data['COND_S'] = data['COND_S'].astype(float)
data['RD_DBO_P'] = data['RD_DBO_P'].astype(float)
data['RD_SS_P'] = data['RD_SS_P'].astype(float)
data['RD_SED_P'] = data['RD_SED_P'].astype(float)
data['RD_DBO_S'] = data['RD_DBO_S'].astype(float)
data['RD_DQO_S'] = data['RD_DQO_S'].astype(float)
data['RD_DBO_G'] = data['RD_DBO_G'].astype(float)
data['RD_DQO_G'] = data['RD_DQO_G'].astype(float)
data['RD_SS_G'] = data['RD_SS_G'].astype(float)
data['RD_SED_G'] = data['RD_SED_G'].astype(float)
data.dtypes
# Dropping out "Date" Column
Data = data.drop(['Date'], axis = 1)
Data.head()
Data.info
# checking for missing values
print(Data.isna().sum())
plt.figure(figsize = (20,12))
sns.heatmap(Data.isnull())
plt.show()
# Replacing the null values with mean-value of respective column
df = Data.fillna(Data.mean())
df.isna()
print(df.isna().sum())
df.info()
df.shape
df.describe()
df_corr = df.corr()
df_corr
sns.heatmap(df_corr)
from sklearn import preprocessing
scaler = preprocessing.StandardScaler()
scaler.fit(df)
Data_scaled_array = scaler.transform(df)
Data_scaled = pd.DataFrame(Data_scaled_array, columns = df.columns)
Data_scaled.sample(5)
# K-Means Clustering
from sklearn.cluster import KMeans
nclusters = 4 # this is the k in kmeans
seed = 0
km = KMeans(n_clusters=nclusters, random_state=seed)
km.fit(Data_scaled)
# predict the cluster for each data point
y_cluster_kmeans = km.predict(Data_scaled)
y_cluster_kmeans
from sklearn import metrics
score = metrics.silhouette_score(Data_scaled, y_cluster_kmeans)
score
scores = metrics.silhouette_samples(Data_scaled, y_cluster_kmeans)
sns.distplot(scores);
from sklearn.cluster import KMeans
wcss = []
for k in range(1, 15):
kmeansForLoop = KMeans(n_clusters = k)
kmeansForLoop.fit(df)
wcss.append(kmeansForLoop.inertia_)
plt.figure(figsize = (15, 15))
plt.plot(range(1, 15), wcss)
plt.xlabel("K value")
plt.ylabel("WCSS")
plt.show()
from sklearn.metrics import silhouette_score
silhouette_scores = []
for n_cluster in range(2,30):
silhouette_scores.append(
silhouette_score(df, KMeans(n_clusters = n_cluster).fit_predict(df)))
# Plotting a bar graph to compare the results
k = [2, 3, 4, 5, 6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29]
plt.bar(k, silhouette_scores)
plt.xlabel('Number of clusters', fontsize = 10)
plt.ylabel('Silhouette Score', fontsize = 10)
plt.grid()
plt.show()
nclusters = 2 # this is the k in kmeans
seed = 0
km = KMeans(n_clusters=nclusters, random_state=seed)
km.fit(Data_scaled)
# predict the cluster for each data point
y_cluster_kmeans = km.predict(Data_scaled)
y_cluster_kmeans
score = metrics.silhouette_score(Data_scaled, y_cluster_kmeans)
score
km.inertia_
km.cluster_centers_
Data_scaled.head()
kmeans = KMeans(n_clusters = 2)
clusters = kmeans.fit_predict(Data_scaled)
Data_scaled["type"] = clusters
Data_scaled["type"].unique()
Data_scaled.head()
Data_scaled.describe()
plt.figure(figsize = (15, 10))
plt.scatter(Data_scaled["SED_E"][Data_scaled["type"] == 0], Data_scaled["PH_E"][Data_scaled["type"] == 0], color = "red")
plt.scatter(Data_scaled["SED_E"][Data_scaled["type"] == 1], Data_scaled["PH_E"][Data_scaled["type"] == 1], color = "green")
plt.xlabel('SED_E')
plt.ylabel('PH_E')
plt.show()
kmeans.cluster_centers_
# K-means++
from sklearn.cluster import KMeans
wcss=[]
#we always assume the max number of cluster would be 10
#you can judge the number of clusters by doing averaging
###Static code to get max no of clusters
for i in range(1,30):
kmeans = KMeans(n_clusters= i, init='k-means++', random_state=0)
kmeans.fit(df)
wcss.append(kmeans.inertia_)
#Visualizing the ELBOW method to get the optimal value of K
plt.plot(range(1,30), wcss)
plt.title('The Elbow Method')
plt.xlabel('no of clusters')
plt.ylabel('wcss')
plt.show()
#Model Build (K-means)
kmeansmodel = KMeans(n_clusters= 5, random_state=0)
y_kmeans= kmeansmodel.fit_predict(df)
score = metrics.silhouette_score(df, y_kmeans)
score
# silhouette score value near to 0.5 or better provides good evidence of the reality of the clusters in the data
# Agglomerative Clustering
df.head()
plt.figure(figsize = (15, 15))
plt.scatter(df["SED_E"], df["PH_E"])
plt.xlabel('SED_E')
plt.ylabel('PH_E')
plt.show()
from scipy.cluster.hierarchy import linkage,dendrogram
merg = linkage(df, method = "ward")
dendrogram(merg, leaf_rotation = 90)
plt.xlabel("data points")
plt.ylabel("euclidean distance")
plt.show()
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = 3, affinity = "euclidean", linkage = "ward")
cluster = hc.fit_predict(df)
Data["label"] = cluster
Data.label.value_counts()
dataWithoutTypes= df[['SED_E','PH_E']]
dataWithoutTypes.head()
from scipy.cluster.hierarchy import linkage,dendrogram
merg = linkage(dataWithoutTypes, method = "ward")
dendrogram(merg, leaf_rotation = 90)
plt.xlabel("data points")
plt.ylabel("euclidean distance")
plt.show()
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = 2, affinity = "euclidean", linkage = "ward")
cluster = hc.fit_predict(dataWithoutTypes)
dataWithoutTypes["label"] = cluster
dataWithoutTypes.label.value_counts()
# Data after hierarchical clustering
plt.figure(figsize = (15, 10))
plt.scatter(dataWithoutTypes["SED_E"][dataWithoutTypes.label == 0], dataWithoutTypes["PH_E"][dataWithoutTypes.label == 0], color = "red")
plt.scatter(dataWithoutTypes["SED_E"][dataWithoutTypes.label == 1], dataWithoutTypes["PH_E"][dataWithoutTypes.label == 1], color = "blue")
plt.xlabel("SED_E")
plt.ylabel("PH_E")
plt.show()
| 0.594434 | 0.716398 |
<a href="https://colab.research.google.com/github/lmcanavals/acomplex/blob/main/03_03_matrix_multiplication.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
def matmul(a, b):
aRows, aCols = a.shape
bRows, bCols = b.shape
if aCols != bRows:
return None
c = np.zeros((aRows, bCols))
for i in range(aRows):
for j in range(bCols):
temp = 0
for k in range(aCols):
temp += a[i, k] * b[k, j]
c[i, j] = temp
return c
a = np.random.randint(10, size=(3, 4))
b = np.random.randint(10, size=(4, 2))
c = matmul(a, b)
assert c.all() == np.matmul(a, b).all()
print(a)
print(b)
print(c)
def mm1(a, b, c, rowi, rowf, coli, colf):
n = len(a)
if rowi == rowf:
temp = 0
for k in range(n):
temp += a[rowi, k] * b[k, coli]
c[rowi, coli] = temp
else:
rowmid = (rowi + rowf) // 2
colmid = (coli + colf) // 2
mm1(a, b, c, rowi, rowmid, coli, colmid)
mm1(a, b, c, rowi, rowmid, colmid + 1, colf)
mm1(a, b, c, rowmid + 1, rowf, coli, colmid)
mm1(a, b, c, rowmid + 1, rowf, colmid + 1, colf)
def dvmatmul1(a, b):
n = len(a)
c = np.zeros((n, n))
mm1(a, b, c, 0, n-1, 0, n-1)
return c
a = np.random.randint(10, size=(8, 8))
b = np.random.randint(10, size=(8, 8))
c = dvmatmul1(a, b)
assert c.all() == np.matmul(a, b).all()
print(a)
print(b)
print(c)
def mm2(a, b, c, rowi, rowf, coli, colf):
aRows, aCols = a.shape
bRows, bCols = b.shape
if rowi == rowf and coli == colf:
temp = 0
for k in range(aCols):
temp += a[rowi, k] * b[k, coli]
c[rowi, coli] = temp
elif rowi == rowf:
colmid = (coli + colf) // 2
mm2(a, b, c, rowi, rowf, coli, colmid)
mm2(a, b, c, rowi, rowf, colmid + 1, colf)
elif coli == colf:
rowmid = (rowi + rowf) // 2
mm2(a, b, c, rowi, rowmid, coli, colf)
mm2(a, b, c, rowmid + 1, rowf, coli, colf)
else:
rowmid = (rowi + rowf) // 2
colmid = (coli + colf) // 2
mm2(a, b, c, rowi, rowmid, coli, colmid)
mm2(a, b, c, rowi, rowmid, colmid + 1, colf)
mm2(a, b, c, rowmid + 1, rowf, coli, colmid)
mm2(a, b, c, rowmid + 1, rowf, colmid + 1, colf)
def dvmatmul2(a, b):
aRows, aCols = a.shape
bRows, bCols = b.shape
if aCols != bRows:
return None
c = np.zeros((aRows, bCols))
mm2(a, b, c, 0, aRows-1, 0, bCols-1)
return c
a = np.random.randint(10, size=(4, 8))
b = np.random.randint(10, size=(8, 3))
c = dvmatmul2(a, b)
assert c.all() == np.matmul(a, b).all()
print(a)
print(b)
print(c)
```
|
github_jupyter
|
import numpy as np
def matmul(a, b):
aRows, aCols = a.shape
bRows, bCols = b.shape
if aCols != bRows:
return None
c = np.zeros((aRows, bCols))
for i in range(aRows):
for j in range(bCols):
temp = 0
for k in range(aCols):
temp += a[i, k] * b[k, j]
c[i, j] = temp
return c
a = np.random.randint(10, size=(3, 4))
b = np.random.randint(10, size=(4, 2))
c = matmul(a, b)
assert c.all() == np.matmul(a, b).all()
print(a)
print(b)
print(c)
def mm1(a, b, c, rowi, rowf, coli, colf):
n = len(a)
if rowi == rowf:
temp = 0
for k in range(n):
temp += a[rowi, k] * b[k, coli]
c[rowi, coli] = temp
else:
rowmid = (rowi + rowf) // 2
colmid = (coli + colf) // 2
mm1(a, b, c, rowi, rowmid, coli, colmid)
mm1(a, b, c, rowi, rowmid, colmid + 1, colf)
mm1(a, b, c, rowmid + 1, rowf, coli, colmid)
mm1(a, b, c, rowmid + 1, rowf, colmid + 1, colf)
def dvmatmul1(a, b):
n = len(a)
c = np.zeros((n, n))
mm1(a, b, c, 0, n-1, 0, n-1)
return c
a = np.random.randint(10, size=(8, 8))
b = np.random.randint(10, size=(8, 8))
c = dvmatmul1(a, b)
assert c.all() == np.matmul(a, b).all()
print(a)
print(b)
print(c)
def mm2(a, b, c, rowi, rowf, coli, colf):
aRows, aCols = a.shape
bRows, bCols = b.shape
if rowi == rowf and coli == colf:
temp = 0
for k in range(aCols):
temp += a[rowi, k] * b[k, coli]
c[rowi, coli] = temp
elif rowi == rowf:
colmid = (coli + colf) // 2
mm2(a, b, c, rowi, rowf, coli, colmid)
mm2(a, b, c, rowi, rowf, colmid + 1, colf)
elif coli == colf:
rowmid = (rowi + rowf) // 2
mm2(a, b, c, rowi, rowmid, coli, colf)
mm2(a, b, c, rowmid + 1, rowf, coli, colf)
else:
rowmid = (rowi + rowf) // 2
colmid = (coli + colf) // 2
mm2(a, b, c, rowi, rowmid, coli, colmid)
mm2(a, b, c, rowi, rowmid, colmid + 1, colf)
mm2(a, b, c, rowmid + 1, rowf, coli, colmid)
mm2(a, b, c, rowmid + 1, rowf, colmid + 1, colf)
def dvmatmul2(a, b):
aRows, aCols = a.shape
bRows, bCols = b.shape
if aCols != bRows:
return None
c = np.zeros((aRows, bCols))
mm2(a, b, c, 0, aRows-1, 0, bCols-1)
return c
a = np.random.randint(10, size=(4, 8))
b = np.random.randint(10, size=(8, 3))
c = dvmatmul2(a, b)
assert c.all() == np.matmul(a, b).all()
print(a)
print(b)
print(c)
| 0.281702 | 0.932944 |
# Rainy or Sunny Hidden Markov Model
authors:<br>
Jacob Schreiber [<a href="mailto:jmschreiber91@gmail.com">jmschreiber91@gmail.com</a>]<br>
Nicholas Farn [<a href="mailto:nicholasfarn@gmail.com">nicholasfarn@gmail.com</a>]
This is an example of a sunny-rainy hidden markov model using yahmm. The example is drawn from the Wikipedia <a href=https://en.wikipedia.org/wiki/Hidden_Markov_model#A_concrete_example>article</a> on Hidden Markov Models describing what Bob likes to do on rainy or sunny days.
```
from pomegranate import *
import random
import math
random.seed(0)
```
We first create a `HiddenMarkovModel` object, and name it "Rainy-Sunny".
```
model = HiddenMarkovModel( name="Rainy-Sunny" )
```
We then create the two possible states of the model, "rainy" and "sunny". We make them both discrete distributions, with the possibilities of Bob either walking, shopping, or cleaning.
```
rainy = State( DiscreteDistribution({ 'walk': 0.1, 'shop': 0.4, 'clean': 0.5 }), name='Rainy' )
sunny = State( DiscreteDistribution({ 'walk': 0.6, 'shop': 0.3, 'clean': 0.1 }), name='Sunny' )
```
We then add the transitions probabilities, starting with the probability the model starts as sunny or rainy.
```
model.add_transition( model.start, rainy, 0.6 )
model.add_transition( model.start, sunny, 0.4 )
```
We then add the transition matrix. We make sure to subtract 0.05 from each probability to add to the probability of exiting the hmm.
```
model.add_transition( rainy, rainy, 0.65 )
model.add_transition( rainy, sunny, 0.25 )
model.add_transition( sunny, rainy, 0.35 )
model.add_transition( sunny, sunny, 0.55 )
```
Last, we add transitions to mark the end of the model.
```
model.add_transition( rainy, model.end, 0.1 )
model.add_transition( sunny, model.end, 0.1 )
```
Finally we "bake" the model, finalizing its structure.
```
model.bake( verbose=True )
```
Now lets check on Bob each hour and see what he is doing! In other words lets create a sequence of observations.
```
sequence = [ 'walk', 'shop', 'clean', 'clean', 'clean', 'walk', 'clean' ]
```
Now lets check the probability of observing this sequence.
```
print(math.e**model.forward( sequence )[ len(sequence), model.end_index ])
```
Then the probability that Bob will be cleaning a step 3 in this sequence.
```
print(math.e**model.forward_backward( sequence )[1][ 2, model.states.index( rainy ) ])
```
The probability of the sequence occurring given it is Sunny at step 4 in the sequence.
```
print(math.e**model.backward( sequence )[ 3, model.states.index( sunny ) ])
```
Finally the probable series of states given the above sequence.
```
print(" ".join( state.name for i, state in model.maximum_a_posteriori( sequence )[1] ))
```
|
github_jupyter
|
from pomegranate import *
import random
import math
random.seed(0)
model = HiddenMarkovModel( name="Rainy-Sunny" )
rainy = State( DiscreteDistribution({ 'walk': 0.1, 'shop': 0.4, 'clean': 0.5 }), name='Rainy' )
sunny = State( DiscreteDistribution({ 'walk': 0.6, 'shop': 0.3, 'clean': 0.1 }), name='Sunny' )
model.add_transition( model.start, rainy, 0.6 )
model.add_transition( model.start, sunny, 0.4 )
model.add_transition( rainy, rainy, 0.65 )
model.add_transition( rainy, sunny, 0.25 )
model.add_transition( sunny, rainy, 0.35 )
model.add_transition( sunny, sunny, 0.55 )
model.add_transition( rainy, model.end, 0.1 )
model.add_transition( sunny, model.end, 0.1 )
model.bake( verbose=True )
sequence = [ 'walk', 'shop', 'clean', 'clean', 'clean', 'walk', 'clean' ]
print(math.e**model.forward( sequence )[ len(sequence), model.end_index ])
print(math.e**model.forward_backward( sequence )[1][ 2, model.states.index( rainy ) ])
print(math.e**model.backward( sequence )[ 3, model.states.index( sunny ) ])
print(" ".join( state.name for i, state in model.maximum_a_posteriori( sequence )[1] ))
| 0.367497 | 0.916969 |
```
import time
import math
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas import concat
from matplotlib import pyplot
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_curve
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import GridSearchCV
import pickle
extra_data=pd.read_csv('new_corrected_data-v1-7-24-2019.csv')# loading extra genereated data genereated by LSTM #prediction_of_1_0007.csv
original_data=pd.read_csv('train_Data.csv')#original data
extra_data.head()#displaying LSTM generated data
extra=np.array(extra_data)# getting the original shape
print(extra.shape)
extra[0] # showing first row vector
# Setting predictions of all generated data to 1 because their outputs are mostly 0.98, 0.99 etc.
for i in range(0,len(extra),1):
extra[i][8]=int(1)
extra #showing LSTM generated extra data ready for training
original_data.head() #showing original data
#loading data values from original dataset(Pima)
val=original_data.values
X=val[:,:8].astype(float)# getting the feature values
Y=val[:,8].astype(int)# getting prediction
#extra_X denotes the extra training data generated by LSTM
extra_X=extra[:,:8].astype(float)
#extra_Y denotes extra training prediction data for training the algorithm
extra_Y=extra[:,8].astype(int)
# extrain=original training data + LSTM generated training data
# eytrain=original training prediction + LSTM generated
extrain=np.concatenate((X,extra_X),axis=0)
eytrain=np.concatenate((Y,extra_Y),axis=0)
print(extrain.shape)
print(eytrain.shape)
test_Data=pd.read_csv('test_Data.csv')
test_Data=np.array(test_Data)
test_Data.shape
X_test=test_Data[:,:8].astype(float)
Y_test=test_Data[:,8].astype(int)
def svc_param_selection(X, y, nfolds):
Cs = [0.001, 0.01, 0.1, 1, 10]
gammas = [0.001, 0.01, 0.1, 1]
param_grid = {'C': Cs, 'gamma' : gammas}
grid_search = GridSearchCV(SVC(kernel='rbf',probability=True), param_grid, cv=nfolds)
grid_search.fit(X, y)
grid_search.best_params_
return grid_search.best_estimator_
# setting seed =7 to produce same results over and over again, it was maintained during different experiments
seed=11
# first rbf_svc for training on just the original training data
#rbf_svc = SVC(kernel='rbf', gamma=0.00001, C=1000,probability=True).fit(X_train,Y_train)
rbf_svc=svc_param_selection(X,Y,5)
# secondly rbf_svc_extra for training on original+LSTM
rbf_svc_extra = svc_param_selection(extrain,eytrain,5)
# fitting the data
rbf_svc.fit(X,Y)
rbf_svc_extra.fit(extrain,eytrain)
# loading the validation dataset previously set up by validation split(this data was not included in any training procedure)
Prediction_data=X_test
#producing the shape
Prediction_data.shape
# making predictions on validation dataset using svc trained only on original training data
prediction_on_real_dataset=rbf_svc.predict_proba(Prediction_data)
#saving probabilities
predictions=rbf_svc.predict(Prediction_data)
# making predictions on validation dataset using svc trained on original training data + LSTM generated data
prediction_on_real_dataset_adding_extra=rbf_svc_extra.predict_proba(Prediction_data)
#saving probabilities
predictions_extra=rbf_svc_extra.predict(Prediction_data)
print(len(prediction_on_real_dataset))
print(len(prediction_on_real_dataset_adding_extra))
from sklearn.metrics import f1_score
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_curve, roc_auc_score
f1_score(Y_test,predictions)#without_extra
f1_score(Y_test,predictions_extra)#with_extra
prediction_on_real_dataset[0]# probabilities of 2 class
# getting AUC score for classifier trained on only original data
fpr, tpr, thres=roc_curve(Y_test,prediction_on_real_dataset[:,1],pos_label=1)
#getting AUC score for classifier trained on original + LSTM generated data
efpr, etpr, ethres=roc_curve(Y_test,prediction_on_real_dataset_adding_extra[:,1],pos_label=1)
with_=auc(fpr,tpr)
without_=auc(efpr,etpr)
print('Without extra: ',auc(fpr,tpr))
print('With extra: ',auc(efpr,etpr))
# plotting data ( for minority predictions , here minority class = 1)
Y_probs=prediction_on_real_dataset[:,1]
fpr,tpr,_=roc_curve(Y_test,Y_probs)
Y_probs_extra=prediction_on_real_dataset_adding_extra[:,1]
efpr,etpr,_=roc_curve(Y_test,Y_probs_extra)
original=auc(fpr,tpr)
lstm_with_extra=auc(efpr,etpr)
original=original.astype(float)
lstm_with_extra=lstm_with_extra.astype(float)
pyplot.clf()
pyplot.plot(fpr, tpr,label='original (AUC : %5.4f)'%(with_),linewidth='1.2')
pyplot.plot(efpr,etpr,label='LSTM generated+original(AUC : %5.4f)'%(without_),linewidth='1.2')
pyplot.xlabel('FPR')
pyplot.ylabel('TPR')
pyplot.title('ROC curve - Pima dataset')
pyplot.legend(loc=4)
pyplot.savefig('ROC curve .png',dpi=500)
pyplot.show()
Y_probs=prediction_on_real_dataset[:,0]
fpr,tpr,_=roc_curve(Y_test,Y_probs)
Y_probs_extra=prediction_on_real_dataset_adding_extra[:,0]
efpr,etpr,_=roc_curve(Y_test,Y_probs_extra)
pyplot.clf()
pyplot.plot(fpr, tpr,label='original')
pyplot.plot(efpr,etpr,label='LSTM generated')
pyplot.xlabel('FPR')
pyplot.ylabel('TPR(recall)')
pyplot.title('ROC curve - Pima dataset')
pyplot.legend(loc=2)
pyplot.show()
prediction_on_real_dataset
prediction_on_real_dataset_adding_extra
```
|
github_jupyter
|
import time
import math
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas import concat
from matplotlib import pyplot
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_curve
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import GridSearchCV
import pickle
extra_data=pd.read_csv('new_corrected_data-v1-7-24-2019.csv')# loading extra genereated data genereated by LSTM #prediction_of_1_0007.csv
original_data=pd.read_csv('train_Data.csv')#original data
extra_data.head()#displaying LSTM generated data
extra=np.array(extra_data)# getting the original shape
print(extra.shape)
extra[0] # showing first row vector
# Setting predictions of all generated data to 1 because their outputs are mostly 0.98, 0.99 etc.
for i in range(0,len(extra),1):
extra[i][8]=int(1)
extra #showing LSTM generated extra data ready for training
original_data.head() #showing original data
#loading data values from original dataset(Pima)
val=original_data.values
X=val[:,:8].astype(float)# getting the feature values
Y=val[:,8].astype(int)# getting prediction
#extra_X denotes the extra training data generated by LSTM
extra_X=extra[:,:8].astype(float)
#extra_Y denotes extra training prediction data for training the algorithm
extra_Y=extra[:,8].astype(int)
# extrain=original training data + LSTM generated training data
# eytrain=original training prediction + LSTM generated
extrain=np.concatenate((X,extra_X),axis=0)
eytrain=np.concatenate((Y,extra_Y),axis=0)
print(extrain.shape)
print(eytrain.shape)
test_Data=pd.read_csv('test_Data.csv')
test_Data=np.array(test_Data)
test_Data.shape
X_test=test_Data[:,:8].astype(float)
Y_test=test_Data[:,8].astype(int)
def svc_param_selection(X, y, nfolds):
Cs = [0.001, 0.01, 0.1, 1, 10]
gammas = [0.001, 0.01, 0.1, 1]
param_grid = {'C': Cs, 'gamma' : gammas}
grid_search = GridSearchCV(SVC(kernel='rbf',probability=True), param_grid, cv=nfolds)
grid_search.fit(X, y)
grid_search.best_params_
return grid_search.best_estimator_
# setting seed =7 to produce same results over and over again, it was maintained during different experiments
seed=11
# first rbf_svc for training on just the original training data
#rbf_svc = SVC(kernel='rbf', gamma=0.00001, C=1000,probability=True).fit(X_train,Y_train)
rbf_svc=svc_param_selection(X,Y,5)
# secondly rbf_svc_extra for training on original+LSTM
rbf_svc_extra = svc_param_selection(extrain,eytrain,5)
# fitting the data
rbf_svc.fit(X,Y)
rbf_svc_extra.fit(extrain,eytrain)
# loading the validation dataset previously set up by validation split(this data was not included in any training procedure)
Prediction_data=X_test
#producing the shape
Prediction_data.shape
# making predictions on validation dataset using svc trained only on original training data
prediction_on_real_dataset=rbf_svc.predict_proba(Prediction_data)
#saving probabilities
predictions=rbf_svc.predict(Prediction_data)
# making predictions on validation dataset using svc trained on original training data + LSTM generated data
prediction_on_real_dataset_adding_extra=rbf_svc_extra.predict_proba(Prediction_data)
#saving probabilities
predictions_extra=rbf_svc_extra.predict(Prediction_data)
print(len(prediction_on_real_dataset))
print(len(prediction_on_real_dataset_adding_extra))
from sklearn.metrics import f1_score
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_curve, roc_auc_score
f1_score(Y_test,predictions)#without_extra
f1_score(Y_test,predictions_extra)#with_extra
prediction_on_real_dataset[0]# probabilities of 2 class
# getting AUC score for classifier trained on only original data
fpr, tpr, thres=roc_curve(Y_test,prediction_on_real_dataset[:,1],pos_label=1)
#getting AUC score for classifier trained on original + LSTM generated data
efpr, etpr, ethres=roc_curve(Y_test,prediction_on_real_dataset_adding_extra[:,1],pos_label=1)
with_=auc(fpr,tpr)
without_=auc(efpr,etpr)
print('Without extra: ',auc(fpr,tpr))
print('With extra: ',auc(efpr,etpr))
# plotting data ( for minority predictions , here minority class = 1)
Y_probs=prediction_on_real_dataset[:,1]
fpr,tpr,_=roc_curve(Y_test,Y_probs)
Y_probs_extra=prediction_on_real_dataset_adding_extra[:,1]
efpr,etpr,_=roc_curve(Y_test,Y_probs_extra)
original=auc(fpr,tpr)
lstm_with_extra=auc(efpr,etpr)
original=original.astype(float)
lstm_with_extra=lstm_with_extra.astype(float)
pyplot.clf()
pyplot.plot(fpr, tpr,label='original (AUC : %5.4f)'%(with_),linewidth='1.2')
pyplot.plot(efpr,etpr,label='LSTM generated+original(AUC : %5.4f)'%(without_),linewidth='1.2')
pyplot.xlabel('FPR')
pyplot.ylabel('TPR')
pyplot.title('ROC curve - Pima dataset')
pyplot.legend(loc=4)
pyplot.savefig('ROC curve .png',dpi=500)
pyplot.show()
Y_probs=prediction_on_real_dataset[:,0]
fpr,tpr,_=roc_curve(Y_test,Y_probs)
Y_probs_extra=prediction_on_real_dataset_adding_extra[:,0]
efpr,etpr,_=roc_curve(Y_test,Y_probs_extra)
pyplot.clf()
pyplot.plot(fpr, tpr,label='original')
pyplot.plot(efpr,etpr,label='LSTM generated')
pyplot.xlabel('FPR')
pyplot.ylabel('TPR(recall)')
pyplot.title('ROC curve - Pima dataset')
pyplot.legend(loc=2)
pyplot.show()
prediction_on_real_dataset
prediction_on_real_dataset_adding_extra
| 0.60871 | 0.393997 |
```
import torch
```
### Why you need a good init
To understand why initialization is important in a neural net, we'll focus on the basic operation you have there: matrix multiplications. So let's just take a vector `x`, and a matrix `a` initiliazed randomly, then multiply them 100 times (as if we had 100 layers).
[Jump_to lesson 9 video](https://course.fast.ai/videos/?lesson=9&t=1132)
```
x = torch.randn(512)
a = torch.randn(512,512)
for i in range(100): x = a @ x
x.mean(),x.std() # the output is Nans because it doesn't fit in memory
```
The problem you'll get with that is activation explosion: very soon, your activations will go to nan. We can even ask the loop to break when that first happens:
```
x = torch.randn(512)
a = torch.randn(512,512)
for i in range(100):
x = a @ x
if x.std() != x.std(): break
i
```
It only takes 27 multiplications! On the other hand, if you initialize your activations with a scale that is too low, then you'll get another problem:
```
x = torch.randn(512)
a = torch.randn(512,512) * 0.01
for i in range(100): x = a @ x
x.mean(),x.std() # negligible gradients
```
Here, every activation vanished to 0. So to avoid that problem, people have come with several strategies to initialize their weight matices, such as:
- use a standard deviation that will make sure x and Ax have exactly the same scale
- use an orthogonal matrix to initialize the weight (orthogonal matrices have the special property that they preserve the L2 norm, so x and Ax would have the same sum of squares in that case)
- use [spectral normalization](https://arxiv.org/pdf/1802.05957.pdf) on the matrix A (the spectral norm of A is the least possible number M such that `torch.norm(A@x) <= M*torch.norm(x)` so dividing A by this M insures you don't overflow. You can still vanish with this)
### The magic number for scaling
Here we will focus on the first one, which is the Xavier initialization. It tells us that we should use a scale equal to `1/math.sqrt(n_in)` where `n_in` is the number of inputs of our matrix.
[Jump_to lesson 9 video](https://course.fast.ai/videos/?lesson=9&t=1273)
```
import math
x = torch.randn(512)
a = torch.randn(512,512) / math.sqrt(512)
for i in range(100): x = a @ x
x.mean(),x.std()
```
And indeed it works. Note that this magic number isn't very far from the 0.01 we had earlier.
```
1/ math.sqrt(512)
```
But where does it come from? It's not that mysterious if you remember the definition of the matrix multiplication. When we do `y = a @ x`, the coefficients of `y` are defined by
$$y_{i} = a_{i,0} x_{0} + a_{i,1} x_{1} + \cdots + a_{i,n-1} x_{n-1} = \sum_{k=0}^{n-1} a_{i,k} x_{k}$$
or in code:
```
y[i] = sum([c*d for c,d in zip(a[i], x)])
```
Now at the very beginning, our `x` vector has a mean of roughly 0. and a standard deviation of roughly 1. (since we picked it that way).
```
x = torch.randn(512)
x.mean(), x.std()
```
NB: This is why it's extremely important to normalize your inputs in Deep Learning, the intialization rules have been designed with inputs that have a mean 0. and a standard deviation of 1.
If you need a refresher from your statistics course, the mean is the sum of all the elements divided by the number of elements (a basic average). The standard deviation represents if the data stays close to the mean or on the contrary gets values that are far away. It's computed by the following formula:
$$\sigma = \sqrt{\frac{1}{n}\left[(x_{0}-m)^{2} + (x_{1}-m)^{2} + \cdots + (x_{n-1}-m)^{2}\right]}$$
where m is the mean and $\sigma$ (the greek letter sigma) is the standard deviation. Here we have a mean of 0, so it's just the square root of the mean of x squared.
If we go back to `y = a @ x` and assume that we chose weights for `a` that also have a mean of 0, we can compute the standard deviation of `y` quite easily. Since it's random, and we may fall on bad numbers, we repeat the operation 100 times.
```
mean,sqr = 0.,0.
for i in range(100):
x = torch.randn(512)
a = torch.randn(512, 512)
y = a @ x
mean += y.mean().item()
sqr += y.pow(2).mean().item()
mean/100,sqr/100
```
Now that looks very close to the dimension of our matrix 512. And that's no coincidence! When you compute y, you sum 512 product of one element of a by one element of x. So what's the mean and the standard deviation of such a product? We can show mathematically that as long as the elements in `a` and the elements in `x` are independent, the mean is 0 and the std is 1. This can also be seen experimentally:
```
mean,sqr = 0.,0.
for i in range(10000):
x = torch.randn(1)
a = torch.randn(1)
y = a*x
mean += y.item()
sqr += y.pow(2).item()
mean/10000,sqr/10000
```
Then we sum 512 of those things that have a mean of zero, and a mean of squares of 1, so we get something that has a mean of 0, and mean of square of 512, hence `math.sqrt(512)` being our magic number. If we scale the weights of the matrix `a` and divide them by this `math.sqrt(512)`, it will give us a `y` of scale 1, and repeating the product has many times as we want won't overflow or vanish.
### Adding ReLU in the mix
We can reproduce the previous experiment with a ReLU, to see that this time, the mean shifts and the standard deviation becomes 0.5. This time the magic number will be `math.sqrt(2/512)` to properly scale the weights of the matrix.
```
mean,sqr = 0.,0.
for i in range(10000):
x = torch.randn(1)
a = torch.randn(1)
y = a*x
y = 0 if y < 0 else y.item()
mean += y
sqr += y ** 2
mean/10000,sqr/10000
```
We can double check by running the experiment on the whole matrix product.
```
mean,sqr = 0.,0.
for i in range(100):
x = torch.randn(512)
a = torch.randn(512, 512)
y = a @ x
y = y.clamp(min=0)
mean += y.mean().item()
sqr += y.pow(2).mean().item()
mean/100,sqr/100
```
Or that scaling the coefficient with the magic number gives us a scale of 1.
```
mean,sqr = 0.,0.
for i in range(100):
x = torch.randn(512)
a = torch.randn(512, 512) * math.sqrt(2/512)
y = a @ x
y = y.clamp(min=0)
mean += y.mean().item()
sqr += y.pow(2).mean().item()
mean/100,sqr/100
```
The math behind is a tiny bit more complex, and you can find everything in the [Kaiming](https://arxiv.org/abs/1502.01852) and the [Xavier](http://proceedings.mlr.press/v9/glorot10a.html) paper but this gives the intuition behing those results.
|
github_jupyter
|
import torch
x = torch.randn(512)
a = torch.randn(512,512)
for i in range(100): x = a @ x
x.mean(),x.std() # the output is Nans because it doesn't fit in memory
x = torch.randn(512)
a = torch.randn(512,512)
for i in range(100):
x = a @ x
if x.std() != x.std(): break
i
x = torch.randn(512)
a = torch.randn(512,512) * 0.01
for i in range(100): x = a @ x
x.mean(),x.std() # negligible gradients
import math
x = torch.randn(512)
a = torch.randn(512,512) / math.sqrt(512)
for i in range(100): x = a @ x
x.mean(),x.std()
1/ math.sqrt(512)
y[i] = sum([c*d for c,d in zip(a[i], x)])
x = torch.randn(512)
x.mean(), x.std()
mean,sqr = 0.,0.
for i in range(100):
x = torch.randn(512)
a = torch.randn(512, 512)
y = a @ x
mean += y.mean().item()
sqr += y.pow(2).mean().item()
mean/100,sqr/100
mean,sqr = 0.,0.
for i in range(10000):
x = torch.randn(1)
a = torch.randn(1)
y = a*x
mean += y.item()
sqr += y.pow(2).item()
mean/10000,sqr/10000
mean,sqr = 0.,0.
for i in range(10000):
x = torch.randn(1)
a = torch.randn(1)
y = a*x
y = 0 if y < 0 else y.item()
mean += y
sqr += y ** 2
mean/10000,sqr/10000
mean,sqr = 0.,0.
for i in range(100):
x = torch.randn(512)
a = torch.randn(512, 512)
y = a @ x
y = y.clamp(min=0)
mean += y.mean().item()
sqr += y.pow(2).mean().item()
mean/100,sqr/100
mean,sqr = 0.,0.
for i in range(100):
x = torch.randn(512)
a = torch.randn(512, 512) * math.sqrt(2/512)
y = a @ x
y = y.clamp(min=0)
mean += y.mean().item()
sqr += y.pow(2).mean().item()
mean/100,sqr/100
| 0.352313 | 0.992161 |
# Plotting
*Elements of Data Science*
Copyright 2021 [Allen B. Downey](https://allendowney.com)
License: [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/)
[Click here to run this notebook on Colab](https://colab.research.google.com/github/AllenDowney/ElementsOfDataScience/blob/master/06_plotting.ipynb) or
[click here to download it](https://github.com/AllenDowney/ElementsOfDataScience/raw/master/06_plotting.ipynb).
This chapter presents ways to create figures and graphs, more generally called **data visualizations**. As examples, we'll generate three figures:
* We'll replicate a figure from the Pew Research Center that shows changes in religious affiliation in the U.S. over time.
* We'll replicate the figure from *The Economist* that shows the prices of sandwiches in Boston and London (we saw this data back in Chapter 3).
* We'll make a plot to test Zipf's law, which describes the relationship between word frequencies and their ranks.
With the tools in this chapter, you can generate a variety of simple graphs. We will see more visualization tools in later chapters.
But before we get started with plotting, we need a new language feature: keyword arguments.
## Keyword arguments
When you call most functions, you have to provide values. For example, when you call `np.exp`, the value you provide is a number:
```
import numpy as np
np.exp(1)
```
When you call `np.power`, you have to provide two numbers:
```
np.power(10, 6)
```
The values you provide are called **arguments**. Specifically, the values in these examples are **positional arguments** because their position determines how they are used.
In the second example, `power` computes `10` to the sixth power, not `6` to the 10th power because of the order of the arguments.
Many functions also take **keyword arguments**, which are optional. For example, we have previously used `int` to convert a string to an integer.
```
int('21')
```
By default, `int` assumes that the number is in base 10. But you can provide a keyword argument that specifies a different base.
For example, the string `'21'`, interpreted in base 8, represents the number `2 * 8 + 1 = 17`. Here's how we do this conversion using `int`.
```
int('21', base=8)
```
The string `'21'` is a positional argument. The integer value `8` is a keyword argument, with the keyword `base`.
Specifying a keyword argument looks like an assignment statement, but it does not create a new variable.
And when you specify a keyword argument, you don't choose the variable name. In this example, the keyword name, `base`, is part of the definition of `int`. If you specify another keyword name, you get an error.
Run the following line in the next cell to see what happens.
```
int('123', bass=11)
```
**Exercise:** The `print` function takes a keyword argument called `end` that specifies the character it prints at the end of the line. By default, `end` is the newline character, `\n`. So if you call `print` more than once, the results normally appear on separate lines, like this:
```
for x in [1, 2, 3]:
print(x)
```
Modify the previous example so it prints the elements of the list, all on one line, with spaces between them.
Then modify it to print an open bracket at the beginning and a close bracket and newline at the end.
```
# Solution goes here
```
## Religious affiliation
Now we're ready to make some graphs.
In October 2019 the Pew Research Center published "In U.S., Decline of Christianity Continues at Rapid Pace" at <https://www.pewforum.org/2019/10/17/in-u-s-decline-of-christianity-continues-at-rapid-pace>.
It includes this figure, which shows changes in religious affiliation among adults in the U.S. over the previous 10 years.

As an exercise, we'll replicate this figure. It shows results from two sources, Religious Landscape Studies and Pew Research Political Surveys. The political surveys provide data from more years, so we'll focus on that.
The data from the figure are available from Pew Research at <https://www.pewforum.org/wp-content/uploads/sites/7/2019/10/Detailed-Tables-v1-FOR-WEB.pdf>, but they are in a PDF document. It is sometimes possible to extract data from PDF documents, but for now we'll enter the data by hand.
```
year = [2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018]
christian = [77, 76, 75, 73, 73, 71, 69, 68, 67, 65]
unaffiliated = [17, 17, 19, 19, 20, 21, 24, 23, 25, 26]
```
The library we'll use for plotting is Matplotlib; more specifically, we'll use a part of it called Pyplot, which we'll import with the nickname `plt`.
```
import matplotlib.pyplot as plt
```
Pyplot provides a function called `plot` that makes a line plot. It takes two sequences as arguments, the `x` values and the `y` values. The sequences can be tuples, lists, or arrays.
```
plt.plot(year, christian);
```
The semi-colon at the end of the line cleans up the output; without it, the return value from `plot` would be displayed above the figure.
If you plot multiple lines in a single cell, they appear on the same axes.
```
plt.plot(year, christian)
plt.plot(year, unaffiliated);
```
Plotting them on the same axes makes it possible to compare them directly.
However, notice that Pyplot chooses the range for the axes automatically; in this example the `y` axis starts around 15, not zero.
As a result, it provides a misleading picture, making the ratio of the two lines look bigger than it really is.
We can set the limits of the `y` axis using the function `plt.ylim`. The argument is a list with two values, the lower bound and the upper bound.
```
plt.plot(year, christian)
plt.plot(year, unaffiliated)
plt.ylim([0, 80]);
```
That's better, but this graph is missing some of the most important elements: labels for the axes and a title.
## Decorating the axes
To label the axes and add a title, we'll use Pyplot functions `xlabel`, `ylabel`, and `title`. All of them take strings as arguments.
```
plt.plot(year, christian)
plt.plot(year, unaffiliated)
plt.ylim([0, 80])
plt.xlabel('Year')
plt.ylabel('% of adults')
plt.title('Religious affiliation of U.S adults');
```
Now let's add another important element, a legend that indicates which line is which.
To do that, we add a label to each line, using the keyword argument `label`.
Then we call `plt.legend` to create the legend.
```
plt.plot(year, christian, label='Christian')
plt.plot(year, unaffiliated, label='Unaffiliated')
plt.ylim([0, 80])
plt.xlabel('Year')
plt.ylabel('% of adults')
plt.title('Religious affiliation of U.S adults')
plt.legend();
```
**Exercise:** The orginal figure plots lines between the data points, but it also plots "markers" showing the location of each data point. It is generally good practice to include markers, especially if data are not available for every year.
Modify the previous example to include a keyword argument `marker` with the string value `'o'`, which indicates that you want to plot circles as markers.
```
# Solution goes here
```
**Exercise:** In the original figure, the line labelled `'Christian'` is red and the line labeled `'Unaffiliated'` is grey.
Find the online documentation of `plt.plot` and figure out how to use keyword arguments to specify colors. Choose colors to (roughly) match the original figure.
The `legend` function takes a keyword argument that speficies the location of the legend. Read the documentation of this function and move the legend to the center left of the figure.
```
# Solution goes here
```
## Sandwiches
In a previous chapter we used data from an article in *The Economist* comparing sandwich prices in Boston and London: "Why Americans pay more for lunch than Britons do" at <https://www.economist.com/finance-and-economics/2019/09/07/why-americans-pay-more-for-lunch-than-britons-do>.
The article includes this graph showing prices of several sandwiches in the two cities:

As an exercise, let's see if we can replicate this figure.
Here's the data from the article again: the names of the sandwiches and the price list for each city.
```
name_list = [
'Lobster roll',
'Chicken caesar',
'Bang bang chicken',
'Ham and cheese',
'Tuna and cucumber',
'Egg'
]
boston_price_list = [9.99, 7.99, 7.49, 7, 6.29, 4.99]
london_price_list = [7.5, 5, 4.4, 5, 3.75, 2.25]
```
In the previous section we plotted percentages on the `y` axis versus time on the `x` axis.
Now we want to plot the sandwich names on the `y` axis and the prices on the `x` axis.
Here's how:
```
plt.plot(boston_price_list, name_list)
plt.xlabel('Price in USD');
```
`name_list` is a list of strings; Pyplot orders them from top to bottom, equally spaced.
By default Pyplot connects the points with lines, but in this example the lines don't make sense because the sandwich names are **categorical**, not numerical. You can't interpolate between an egg sandwich and a tuna sandwich.
We can turn on markers and turn off lines with keyword arguments.
```
plt.plot(boston_price_list, name_list,
marker='o', linestyle='')
plt.xlabel('Price in USD');
```
Or we can do the same thing more concisely by providing a **format string** as a positional argument. You can read the documentation of `plt.plot` to learn more about format strings.
```
plt.plot(boston_price_list, name_list, 'o')
plt.plot(london_price_list, name_list, 's')
plt.xlabel('Price in USD')
plt.title('Pret a Manger prices in Boston and London');
```
I added a title at the same time.
Now, to approximate the colors in the original figure, we can use the strings `'C3'` and `'C0'`, which specify colors from the default color sequence. You can read the documentation to learn more about specifying colors in Pyplot: <https://matplotlib.org/3.1.1/tutorials/colors/colors.html>.
```
plt.plot(boston_price_list, name_list, 'o', color='C3')
plt.plot(london_price_list, name_list, 's', color='C0')
plt.xlabel('Price in USD')
plt.title('Pret a Manger prices in Boston and London');
```
To connect the dots with lines, we'll use `plt.hlines`, which draws horizontal lines. It takes three arguments: a sequence of values on the `y` axis, which are the sandwich names in this example, and two sequences of values on the `x` axis, which are the London prices and Boston prices.
```
plt.plot(boston_price_list, name_list, 'o', color='C3')
plt.plot(london_price_list, name_list, 's', color='C0')
plt.hlines(name_list, london_price_list, boston_price_list)
plt.xlabel('Price in USD')
plt.title('Pret a Manger prices in Boston and London');
```
**Exercise:** To finish off this example, add a legend that identifies the London and Boston prices. Remember that you have to add a `label` keyword each time you call `plt.plot`, and then call `plt.legend`.
Notice that the sandwiches in our figure are in the opposite order of the sandwiches in the original figure. There is a Pyplot function that inverts the `y` axis; see if you can find it and use it to reverse the order of the sandwich list.
```
# Solution goes here
```
## Zipf's law
In the previous chapter we downloaded *War and Peace* from Project Gutenberg and counted the number of lines and words.
Then we used a dictionary to count the number of unique words and the number of times each one appears.
Now we'll use those results to generate a "Zipf plot", which shows the frequency of the words on the `y` axis, ordered from the most common word to the least.
First, let's download the book again.
When you run the following cell, it checks to see whether you already have a file named `2600-0.txt`, which is the name of the file that contains the text of *War and Peace*.
If not, it copies the file from Project Gutenberg to your computer.
```
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://www.gutenberg.org/files/2600/2600-0.txt')
```
In the previous chapter, we looped through the book and made a string that contains all punctuation characters.
Here are the results, which we will need again.
```
all_punctuation = ',.-:[#]*/“’—‘!?”;()%@'
```
And here's a solution to one of the previous exercises. It loops through the book and makes a dictionary that maps from each word to the number of times it appears.
```
first_line = "CHAPTER I\n"
last_line = ("End of the Project Gutenberg EBook of " +
"War and Peace, by Leo Tolstoy\n")
fp = open('2600-0.txt')
for line in fp:
if line == first_line:
break
unique_words = {}
for line in fp:
if line == last_line:
break
for word in line.split():
word = word.lower()
word = word.strip(all_punctuation)
if word in unique_words:
unique_words[word] += 1
else:
unique_words[word] = 1
```
## Frequencies and ranks
In this section we'll test Zipf's law, which states that
> given some corpus of natural language utterances, the frequency of any word is inversely proportional to its rank in the frequency table. Thus the most frequent word will occur approximately twice as often as the second most frequent word, three times as often as the third most frequent word, etc.
See <https://en.wikipedia.org/wiki/Zipfs_law>.
To see if this law holds for the words in *War and Peace*, we'll make a plot that shows:
* The frequency of each word on the `y` axis, and
* The rank of each word on the `x` axis, where the rank of the most frequent word is 1, the rank of the second most common word is 2, etc.
In `unique_words`, the keys are words and the values are their frequencies. We can use the `values` function to get the values from the dictionary. The result has the type `dict_values`:
```
freqs = unique_words.values()
type(freqs)
```
Before we plot them, we have to sort them, but the `sort` function doesn't work with `dict_values`.
Try this to see what happens:
```
freqs.sort()
```
We can use `list` to make a list of frequencies:
```
freqs = list(unique_words.values())
type(freqs)
```
And now we can use `sort`. By default it sorts in ascending order, but we can pass a keyword argument to reverse the order.
```
freqs.sort(reverse=True)
```
Now, for the ranks, we need a sequence that counts from 1 to `n`, where `n` is the number of elements in `freqs`. We can use the `range` function, which returns a value with type `range`.
As a small example, here's the range from 1 to 5.
```
range(1, 5)
```
However, there's a catch. If we use the range to make a list, we see that "the range from 1 to 5" includes 1, but it doesn't include 5.
```
list(range(1, 5))
```
That might seem strange, but it is often more convenient to use `range` when it is defined this way, rather than what might seem like the more natural way (see <https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html>).
Anyway, we can get what we want by increasing the second argument by one:
```
list(range(1, 6))
```
So, finally, we can make a range that represents the ranks from `1` to `n`:
```
n = len(freqs)
ranks = range(1, n+1)
ranks
```
And now we can plot the frequencies versus the ranks:
```
plt.plot(ranks, freqs)
plt.xlabel('Rank')
plt.ylabel('Frequency')
plt.title("War and Peace and Zipf's law");
```
## Logarithmic scales
The few most common words are very common, but the great majority of words are rare. So that's consistent with Zipf's law, but Zipf's law is more specific. It claims that the frequencies should be inversely proportional to the ranks. If that's true, we can write:
$f = k / r$
where $r$ is the rank of a word, $f$ is its frequency, and $k$ is an unknown constant of proportionality. If we take the log of both sides, we get this:
$\log f = \log k - \log r$
This equation implies that if we plot $f$ versus $r$ on a log-log scale, we expect to see a straight line with intercept at $\log k$ and slope -1.
We can use `plt.xscale` to plot the `x` axis on a log scale.
```
plt.plot(ranks, freqs)
plt.xlabel('Rank')
plt.ylabel('Frequency')
plt.title("War and Peace and Zipf's law")
plt.xscale('log')
```
And `plt.yscale` to plot the `y` axis on a log scale.
```
plt.plot(ranks, freqs)
plt.xlabel('Rank')
plt.ylabel('Frequency')
plt.title("War and Peace and Zipf's law")
plt.xscale('log')
plt.yscale('log')
```
The result is not quite a straight line, but it is close. We can get a sense of the slope by connecting the end points with a line.
I'll select the first and last elements from `xs`.
```
xs = ranks[0], ranks[-1]
xs
```
And the first and last elements from `ys`.
```
ys = freqs[0], freqs[-1]
ys
```
And plot a line between them.
```
plt.plot(xs, ys, color='gray')
plt.plot(ranks, freqs)
plt.xlabel('Rank')
plt.ylabel('Frequency')
plt.title("War and Peace and Zipf's law")
plt.xscale('log')
plt.yscale('log')
```
The slope of this line is the "rise over run", that is, the difference on the `y` axis divided by the difference on the `x` axis.
We can compute the rise using `np.log10` to compute the log base 10 of the first and last values:
```
np.log10(ys)
```
Then we can use `np.diff` to compute the difference between the elements:
```
rise = np.diff(np.log10(ys))
rise
```
**Exercise:** Use `log10` and `diff` to compute the run, that is, the difference on the `x` axis. Then divide the rise by the run to get the slope of the grey line.
Is it close to -1, as Zipf's law predicts?
```
# Solution goes here
# Solution goes here
```
|
github_jupyter
|
import numpy as np
np.exp(1)
np.power(10, 6)
int('21')
int('21', base=8)
int('123', bass=11)
for x in [1, 2, 3]:
print(x)
# Solution goes here
year = [2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018]
christian = [77, 76, 75, 73, 73, 71, 69, 68, 67, 65]
unaffiliated = [17, 17, 19, 19, 20, 21, 24, 23, 25, 26]
import matplotlib.pyplot as plt
plt.plot(year, christian);
plt.plot(year, christian)
plt.plot(year, unaffiliated);
plt.plot(year, christian)
plt.plot(year, unaffiliated)
plt.ylim([0, 80]);
plt.plot(year, christian)
plt.plot(year, unaffiliated)
plt.ylim([0, 80])
plt.xlabel('Year')
plt.ylabel('% of adults')
plt.title('Religious affiliation of U.S adults');
plt.plot(year, christian, label='Christian')
plt.plot(year, unaffiliated, label='Unaffiliated')
plt.ylim([0, 80])
plt.xlabel('Year')
plt.ylabel('% of adults')
plt.title('Religious affiliation of U.S adults')
plt.legend();
# Solution goes here
# Solution goes here
name_list = [
'Lobster roll',
'Chicken caesar',
'Bang bang chicken',
'Ham and cheese',
'Tuna and cucumber',
'Egg'
]
boston_price_list = [9.99, 7.99, 7.49, 7, 6.29, 4.99]
london_price_list = [7.5, 5, 4.4, 5, 3.75, 2.25]
plt.plot(boston_price_list, name_list)
plt.xlabel('Price in USD');
plt.plot(boston_price_list, name_list,
marker='o', linestyle='')
plt.xlabel('Price in USD');
plt.plot(boston_price_list, name_list, 'o')
plt.plot(london_price_list, name_list, 's')
plt.xlabel('Price in USD')
plt.title('Pret a Manger prices in Boston and London');
plt.plot(boston_price_list, name_list, 'o', color='C3')
plt.plot(london_price_list, name_list, 's', color='C0')
plt.xlabel('Price in USD')
plt.title('Pret a Manger prices in Boston and London');
plt.plot(boston_price_list, name_list, 'o', color='C3')
plt.plot(london_price_list, name_list, 's', color='C0')
plt.hlines(name_list, london_price_list, boston_price_list)
plt.xlabel('Price in USD')
plt.title('Pret a Manger prices in Boston and London');
# Solution goes here
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://www.gutenberg.org/files/2600/2600-0.txt')
all_punctuation = ',.-:[#]*/“’—‘!?”;()%@'
first_line = "CHAPTER I\n"
last_line = ("End of the Project Gutenberg EBook of " +
"War and Peace, by Leo Tolstoy\n")
fp = open('2600-0.txt')
for line in fp:
if line == first_line:
break
unique_words = {}
for line in fp:
if line == last_line:
break
for word in line.split():
word = word.lower()
word = word.strip(all_punctuation)
if word in unique_words:
unique_words[word] += 1
else:
unique_words[word] = 1
freqs = unique_words.values()
type(freqs)
freqs.sort()
freqs = list(unique_words.values())
type(freqs)
freqs.sort(reverse=True)
range(1, 5)
list(range(1, 5))
list(range(1, 6))
n = len(freqs)
ranks = range(1, n+1)
ranks
plt.plot(ranks, freqs)
plt.xlabel('Rank')
plt.ylabel('Frequency')
plt.title("War and Peace and Zipf's law");
plt.plot(ranks, freqs)
plt.xlabel('Rank')
plt.ylabel('Frequency')
plt.title("War and Peace and Zipf's law")
plt.xscale('log')
plt.plot(ranks, freqs)
plt.xlabel('Rank')
plt.ylabel('Frequency')
plt.title("War and Peace and Zipf's law")
plt.xscale('log')
plt.yscale('log')
xs = ranks[0], ranks[-1]
xs
ys = freqs[0], freqs[-1]
ys
plt.plot(xs, ys, color='gray')
plt.plot(ranks, freqs)
plt.xlabel('Rank')
plt.ylabel('Frequency')
plt.title("War and Peace and Zipf's law")
plt.xscale('log')
plt.yscale('log')
np.log10(ys)
rise = np.diff(np.log10(ys))
rise
# Solution goes here
# Solution goes here
| 0.542015 | 0.992349 |
# Module 3: Model Training
**This notebook uses the feature set extracted by `module-2` to create a XGBoost based machine learning model for binary classification**
**Note:** Please set kernel to `Python 3 (Data Science)` and select instance to `ml.t3.medium`
---
## Contents
1. [Background](#Background)
1. [Setup](#Setup)
1. [Load transformed feature set](#Load-transformed-feature-set)
1. [Split data](#Split-data)
1. [Train a model using SageMaker built-in XgBoost algorithm](#Train-a-model-using-SageMaker-built-in-XgBoost-algorithm)
1. [Real time inference using the deployed endpoint](#Real-time-inference-using-the-deployed-endpoint)
# Background
In this notebook, we demonstrate how to use the feature set derived in `Module-2` and create a machine learning model for predicting whether a customer will reorder a product or not based on historical records. Given the problem type is supervised binary classification, we will use a SageMaker built-in algorithm XGBoost to design this classifier. Once the model is trained, we will also deploy the trained model as a SageMaker endpoint for real-time inference.
# Setup
```
from sagemaker.serializers import CSVSerializer
from sagemaker.inputs import TrainingInput
from sagemaker.predictor import Predictor
from sagemaker import get_execution_role
import pandas as pd
import numpy as np
import sagemaker
import logging
import boto3
import json
import os
logger = logging.getLogger('__name__')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
```
#### Essentials
```
sagemaker_execution_role = get_execution_role()
logger.info(f'Role = {sagemaker_execution_role}')
session = boto3.Session()
sagemaker_session = sagemaker.Session()
default_bucket = sagemaker_session.default_bucket()
prefix = 'sagemaker-featurestore-workshop'
s3 = session.resource('s3')
```
# Load transformed feature set
```
df = pd.read_csv('.././data/train/transformed.csv')
df.head(5)
df.shape
```
Move column `is_redordered` to be the first column since our training algorithm `XGBoost` expects the target column to be the first column.
```
first_column = df.pop('is_reordered')
df.insert(0, 'is_reordered', first_column)
df.head()
```
# Split data
We will shuffle the whole dataset first (df.sample(frac=1, random_state=123)) and then split our data set into the following parts:
* 70% - train set,
* 20% - validation set,
* 10% - test set
**Note:** In the code below, the first element denotes size for train (0.7 = 70%), second element denotes size for test (1-0.9 = 0.1 = 10%) and difference between the two denotes size for validation(1 - [0.7+0.1] = 0.2 = 20%).
```
train_df, validation_df, test_df = np.split(df.sample(frac=1, random_state=123), [int(.7*len(df)), int(.9*len(df))])
train_df.shape
validation_df.shape
test_df.shape
```
Save split datasets to local
```
train_df.to_csv('../data/train/train.csv', index=False)
validation_df.to_csv('../data/validation/validation.csv', index=False)
test_df.to_csv('../data/test/test.csv', index=False)
```
Copy datasets to S3 from local
```
s3.Bucket(default_bucket).Object(os.path.join(prefix, 'train/train.csv')).upload_file('.././data/train/train.csv')
s3.Bucket(default_bucket).Object(os.path.join(prefix, 'validation/validation.csv')).upload_file('.././data/validation/validation.csv')
s3.Bucket(default_bucket).Object(os.path.join(prefix, 'test/test.csv')).upload_file('.././data/test/test.csv')
```
Create Pointers to the uploaded files
```
train_set_location = 's3://{}/{}/train/'.format(default_bucket, prefix)
validation_set_location = 's3://{}/{}/validation/'.format(default_bucket, prefix)
test_set_location = 's3://{}/{}/test/'.format(default_bucket, prefix)
train_set_pointer = TrainingInput(s3_data=train_set_location, content_type='csv')
validation_set_pointer = TrainingInput(s3_data=validation_set_location, content_type='csv')
test_set_pointer = TrainingInput(s3_data=test_set_location, content_type='csv')
print(json.dumps(train_set_pointer.__dict__, indent=2))
```
# Train a model using SageMaker built-in XgBoost algorithm
```
container_uri = sagemaker.image_uris.retrieve(region=session.region_name,
framework='xgboost',
version='1.0-1',
image_scope='training')
xgb = sagemaker.estimator.Estimator(image_uri=container_uri,
role=sagemaker_execution_role,
instance_count=2,
instance_type='ml.m5.xlarge',
output_path='s3://{}/{}/model-artifacts'.format(default_bucket, prefix),
sagemaker_session=sagemaker_session,
base_job_name='reorder-classifier')
xgb.set_hyperparameters(objective='binary:logistic',
num_round=100)
xgb.fit({'train': train_set_pointer, 'validation': validation_set_pointer})
```
#### Saving Training Job Information
```
# Saving training job information to be used in the ML lineage module
training_job_info = xgb.latest_training_job.describe()
if training_job_info != None :
training_jobName = training_job_info["TrainingJobName"]
%store training_jobName
```
#### Host the trained XGBoost model as a SageMaker Endpoint
**Note:** The deployment usually takes ~10 mins - good time to take a coffee break :)
```
xgb_predictor = xgb.deploy(initial_instance_count=2,
instance_type='ml.m5.xlarge')
```
# Real time inference using the deployed endpoint
```
csv_serializer = CSVSerializer()
endpoint_name = xgb_predictor.endpoint_name
%store endpoint_name
predictor = Predictor(endpoint_name=endpoint_name,
serializer=csv_serializer)
test_df = pd.read_csv('.././data/test/test.csv')
record = test_df.sample(1)
record
X = record.values[0]
payload = X[1:]
payload
%%time
predicted_class_prob = predictor.predict(payload).decode('utf-8')
if float(predicted_class_prob) < 0.5:
logger.info('Prediction (y) = Will not reorder')
else:
logger.info('Prediction (y) = Will reorder')
```
|
github_jupyter
|
from sagemaker.serializers import CSVSerializer
from sagemaker.inputs import TrainingInput
from sagemaker.predictor import Predictor
from sagemaker import get_execution_role
import pandas as pd
import numpy as np
import sagemaker
import logging
import boto3
import json
import os
logger = logging.getLogger('__name__')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
sagemaker_execution_role = get_execution_role()
logger.info(f'Role = {sagemaker_execution_role}')
session = boto3.Session()
sagemaker_session = sagemaker.Session()
default_bucket = sagemaker_session.default_bucket()
prefix = 'sagemaker-featurestore-workshop'
s3 = session.resource('s3')
df = pd.read_csv('.././data/train/transformed.csv')
df.head(5)
df.shape
first_column = df.pop('is_reordered')
df.insert(0, 'is_reordered', first_column)
df.head()
train_df, validation_df, test_df = np.split(df.sample(frac=1, random_state=123), [int(.7*len(df)), int(.9*len(df))])
train_df.shape
validation_df.shape
test_df.shape
train_df.to_csv('../data/train/train.csv', index=False)
validation_df.to_csv('../data/validation/validation.csv', index=False)
test_df.to_csv('../data/test/test.csv', index=False)
s3.Bucket(default_bucket).Object(os.path.join(prefix, 'train/train.csv')).upload_file('.././data/train/train.csv')
s3.Bucket(default_bucket).Object(os.path.join(prefix, 'validation/validation.csv')).upload_file('.././data/validation/validation.csv')
s3.Bucket(default_bucket).Object(os.path.join(prefix, 'test/test.csv')).upload_file('.././data/test/test.csv')
train_set_location = 's3://{}/{}/train/'.format(default_bucket, prefix)
validation_set_location = 's3://{}/{}/validation/'.format(default_bucket, prefix)
test_set_location = 's3://{}/{}/test/'.format(default_bucket, prefix)
train_set_pointer = TrainingInput(s3_data=train_set_location, content_type='csv')
validation_set_pointer = TrainingInput(s3_data=validation_set_location, content_type='csv')
test_set_pointer = TrainingInput(s3_data=test_set_location, content_type='csv')
print(json.dumps(train_set_pointer.__dict__, indent=2))
container_uri = sagemaker.image_uris.retrieve(region=session.region_name,
framework='xgboost',
version='1.0-1',
image_scope='training')
xgb = sagemaker.estimator.Estimator(image_uri=container_uri,
role=sagemaker_execution_role,
instance_count=2,
instance_type='ml.m5.xlarge',
output_path='s3://{}/{}/model-artifacts'.format(default_bucket, prefix),
sagemaker_session=sagemaker_session,
base_job_name='reorder-classifier')
xgb.set_hyperparameters(objective='binary:logistic',
num_round=100)
xgb.fit({'train': train_set_pointer, 'validation': validation_set_pointer})
# Saving training job information to be used in the ML lineage module
training_job_info = xgb.latest_training_job.describe()
if training_job_info != None :
training_jobName = training_job_info["TrainingJobName"]
%store training_jobName
xgb_predictor = xgb.deploy(initial_instance_count=2,
instance_type='ml.m5.xlarge')
csv_serializer = CSVSerializer()
endpoint_name = xgb_predictor.endpoint_name
%store endpoint_name
predictor = Predictor(endpoint_name=endpoint_name,
serializer=csv_serializer)
test_df = pd.read_csv('.././data/test/test.csv')
record = test_df.sample(1)
record
X = record.values[0]
payload = X[1:]
payload
%%time
predicted_class_prob = predictor.predict(payload).decode('utf-8')
if float(predicted_class_prob) < 0.5:
logger.info('Prediction (y) = Will not reorder')
else:
logger.info('Prediction (y) = Will reorder')
| 0.318061 | 0.938237 |
## Домашняя работа №7
```
import numpy as np
from sklearn import model_selection
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
%matplotlib inline
X, y = load_iris(return_X_y=True)
# Для наглядности возьмем только первые два признака (всего в датасете их 4)
X = X[:, :2]
```
Разделим выборку на обучающую и тестовую
```
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2, random_state=1)
cmap = ListedColormap(['red', 'green', 'blue'])
plt.figure(figsize=(7, 7))
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap)
```
Используем евклидову метрику. Реализуем функцию для ее подсчета.
```
def e_metrics(x1, x2):
return np.sqrt(np.sum((x2-x1)**2))
```
Реализуем алгоритм поиска k ближайших соседей.
```
def knn(x_train, y_train, x_test, k):
answers = []
for x in x_test:
test_distances = []
for i in range(len(x_train)):
# расчет расстояния от классифицируемого объекта до
# объекта обучающей выборки
distance = e_metrics(x, x_train[i])
# Записываем в список значение расстояния и ответа на объекте обучающей выборки
test_distances.append((distance, y_train[i]))
# создаем словарь со всеми возможными классами
classes = {class_item: 0 for class_item in set(y_train)}
# Сортируем список и среди первых k элементов подсчитаем частоту появления разных классов
for d in sorted(test_distances)[0:k]:
classes[d[1]] += d[0] * k
# Записываем в список ответов наиболее часто встречающийся класс
answers.append(sorted(classes, key=classes.get)[-1])
return answers
```
Напишем функцию для вычисления точности
```
def accuracy(pred, y):
return (sum(pred == y) / len(y))
```
Проверим работу алгоритма при различных k
```
k = 5
y_pred = knn(X_train, y_train, X_test, k)
print(f'Точность алгоритма при k = {k}: {accuracy(y_pred, y_test):.3f}')
```
Построим график распределения классов.
```
for n in range(1, 11):
y_pred = knn(X_train, y_train, X_test, n)
print(f'Точность алгоритма при k = {n}: {accuracy(y_pred, y_test):.3f}')
```
### Странно, что алгоритм работает лучше, даже если добавлять вес классу простым умножением дистанции на k, ибо ,умножая меньшую дистанцию (что лучше), на константу, мы получим меньший вклад.
```
def get_graph(X_train, y_train, k):
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA','#00AAFF'])
h = .02
# Расчет пределов графика
x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Получим предсказания для всех точек
Z = knn(X_train, y_train, np.c_[xx.ravel(), yy.ravel()], k)
# Построим график
Z = np.array(Z).reshape(xx.shape)
plt.figure(figsize=(7,7))
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Добавим на график обучающую выборку
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cmap)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title(f"Трехклассовая kNN классификация при k = {k}")
plt.show()
get_graph(X_train, y_train, k)
def knn_2(x_train, y_train, x_test, k):
answers = []
for x in x_test:
test_distances = []
for i in range(len(x_train)):
# расчет расстояния от классифицируемого объекта до
# объекта обучающей выборки
distance = e_metrics(x, x_train[i])
# Записываем в список значение расстояния и ответа на объекте обучающей выборки
test_distances.append((distance, y_train[i]))
# создаем словарь со всеми возможными классами
classes = {class_item: 0 for class_item in set(y_train)}
# Сортируем список и среди первых k элементов подсчитаем частоту появления разных классов
for d in sorted(test_distances)[0:k]:
if d[0] == 0:
classes[d[1]] += 100000
else:
classes[d[1]] += 1/d[0] * k
# Записываем в список ответов наиболее часто встречающийся класс
answers.append(sorted(classes, key=classes.get)[-1])
return answers
for n in range(1, 11):
y_pred = knn_2(X_train, y_train, X_test, n)
print(f'Точность алгоритма при k = {n}: {accuracy(y_pred, y_test):.3f}')
```
**Если же добавлять обратное расстояние умноженное на k, то алгоритм показывает худшие результаты.**
## Проверим алгоритм на другой выборке по iris-dataset.
```
X, y = load_iris(return_X_y=True)
# Для наглядности возьмем только первые два признака (всего в датасете их 4)
X = X[:, 1:3]
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2, random_state=1)
cmap = ListedColormap(['red', 'green', 'blue'])
plt.figure(figsize=(7, 7))
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap)
for n in range(1, 11):
y_pred = knn(X_train, y_train, X_test, n)
print(f'Точность алгоритма при k = {n}: {accuracy(y_pred, y_test):.3f}')
for n in range(1, 11):
y_pred = knn_2(X_train, y_train, X_test, n)
print(f'Точность алгоритма при k = {n}: {accuracy(y_pred, y_test):.3f}')
get_graph(X_train, y_train, k)
```
|
github_jupyter
|
import numpy as np
from sklearn import model_selection
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
%matplotlib inline
X, y = load_iris(return_X_y=True)
# Для наглядности возьмем только первые два признака (всего в датасете их 4)
X = X[:, :2]
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2, random_state=1)
cmap = ListedColormap(['red', 'green', 'blue'])
plt.figure(figsize=(7, 7))
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap)
def e_metrics(x1, x2):
return np.sqrt(np.sum((x2-x1)**2))
def knn(x_train, y_train, x_test, k):
answers = []
for x in x_test:
test_distances = []
for i in range(len(x_train)):
# расчет расстояния от классифицируемого объекта до
# объекта обучающей выборки
distance = e_metrics(x, x_train[i])
# Записываем в список значение расстояния и ответа на объекте обучающей выборки
test_distances.append((distance, y_train[i]))
# создаем словарь со всеми возможными классами
classes = {class_item: 0 for class_item in set(y_train)}
# Сортируем список и среди первых k элементов подсчитаем частоту появления разных классов
for d in sorted(test_distances)[0:k]:
classes[d[1]] += d[0] * k
# Записываем в список ответов наиболее часто встречающийся класс
answers.append(sorted(classes, key=classes.get)[-1])
return answers
def accuracy(pred, y):
return (sum(pred == y) / len(y))
k = 5
y_pred = knn(X_train, y_train, X_test, k)
print(f'Точность алгоритма при k = {k}: {accuracy(y_pred, y_test):.3f}')
for n in range(1, 11):
y_pred = knn(X_train, y_train, X_test, n)
print(f'Точность алгоритма при k = {n}: {accuracy(y_pred, y_test):.3f}')
def get_graph(X_train, y_train, k):
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA','#00AAFF'])
h = .02
# Расчет пределов графика
x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Получим предсказания для всех точек
Z = knn(X_train, y_train, np.c_[xx.ravel(), yy.ravel()], k)
# Построим график
Z = np.array(Z).reshape(xx.shape)
plt.figure(figsize=(7,7))
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Добавим на график обучающую выборку
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cmap)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title(f"Трехклассовая kNN классификация при k = {k}")
plt.show()
get_graph(X_train, y_train, k)
def knn_2(x_train, y_train, x_test, k):
answers = []
for x in x_test:
test_distances = []
for i in range(len(x_train)):
# расчет расстояния от классифицируемого объекта до
# объекта обучающей выборки
distance = e_metrics(x, x_train[i])
# Записываем в список значение расстояния и ответа на объекте обучающей выборки
test_distances.append((distance, y_train[i]))
# создаем словарь со всеми возможными классами
classes = {class_item: 0 for class_item in set(y_train)}
# Сортируем список и среди первых k элементов подсчитаем частоту появления разных классов
for d in sorted(test_distances)[0:k]:
if d[0] == 0:
classes[d[1]] += 100000
else:
classes[d[1]] += 1/d[0] * k
# Записываем в список ответов наиболее часто встречающийся класс
answers.append(sorted(classes, key=classes.get)[-1])
return answers
for n in range(1, 11):
y_pred = knn_2(X_train, y_train, X_test, n)
print(f'Точность алгоритма при k = {n}: {accuracy(y_pred, y_test):.3f}')
X, y = load_iris(return_X_y=True)
# Для наглядности возьмем только первые два признака (всего в датасете их 4)
X = X[:, 1:3]
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2, random_state=1)
cmap = ListedColormap(['red', 'green', 'blue'])
plt.figure(figsize=(7, 7))
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap)
for n in range(1, 11):
y_pred = knn(X_train, y_train, X_test, n)
print(f'Точность алгоритма при k = {n}: {accuracy(y_pred, y_test):.3f}')
for n in range(1, 11):
y_pred = knn_2(X_train, y_train, X_test, n)
print(f'Точность алгоритма при k = {n}: {accuracy(y_pred, y_test):.3f}')
get_graph(X_train, y_train, k)
| 0.284775 | 0.942454 |
# ORF recognition by LSTM
LSTM was incapable of ORF recognition on 1000 bp sequences (notebook 101) but capable on 100 bp sequences (notebook 102). Try sizes inbetween.
```
import time
t = time.time()
time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
PC_SEQUENCES=2000 # how many protein-coding sequences
NC_SEQUENCES=2000 # how many non-coding sequences
PC_TESTS=1000
NC_TESTS=1000
BASES=1000 # how long is each sequence
ALPHABET=4 # how many different letters are possible
INPUT_SHAPE_2D = (BASES,ALPHABET,1) # Conv2D needs 3D inputs
INPUT_SHAPE = (BASES,ALPHABET) # Conv1D needs 2D inputs
NEURONS = 32
#DROP_RATE = 0.2
EPOCHS=50 # how many times to train on all the data
SPLITS=5 # SPLITS=3 means train on 2/3 and validate on 1/3
FOLDS=5 # train the model this many times (range 1 to SPLITS)
import sys
try:
from google.colab import drive
IN_COLAB = True
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
#drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_gen.py')
with open('RNA_gen.py', 'w') as f:
f.write(r.text)
from RNA_gen import *
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(r.text)
from RNA_describe import *
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_prep.py')
with open('RNA_prep.py', 'w') as f:
f.write(r.text)
from RNA_prep import *
except:
print("CoLab not working. On my PC, use relative paths.")
IN_COLAB = False
DATAPATH='data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_gen import *
from SimTools.RNA_describe import *
from SimTools.RNA_prep import *
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
if not assert_imported_RNA_gen():
print("ERROR: Cannot use RNA_gen.")
if not assert_imported_RNA_prep():
print("ERROR: Cannot use RNA_prep.")
from os import listdir
import csv
from zipfile import ZipFile
import numpy as np
import pandas as pd
from scipy import stats # mode
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import LSTM
from keras.losses import BinaryCrossentropy
# tf.keras.losses.BinaryCrossentropy
import matplotlib.pyplot as plt
from matplotlib import colors
mycmap = colors.ListedColormap(['red','blue']) # list color for label 0 then 1
np.set_printoptions(precision=2)
# Use code from our SimTools library.
def make_generators(seq_len):
pcgen = Collection_Generator()
pcgen.get_len_oracle().set_mean(seq_len)
trans = Transcript_Oracle()
trans.set_orf_len_mean(50) #### Very short ORFs!
pcgen.set_seq_oracle(trans)
ncgen = Collection_Generator()
ncgen.get_len_oracle().set_mean(seq_len)
return pcgen,ncgen
def get_the_facts(seqs):
rd = RNA_describer()
facts = rd.get_three_lengths(seqs)
facts_ary = np.asarray(facts) # 5000 rows, 3 columns
print("Facts array:",type(facts_ary))
print("Facts array:",facts_ary.shape)
# Get the mean of each column
mean_5utr, mean_orf, mean_3utr = np.mean(facts_ary,axis=0)
std_5utr, std_orf, std_3utr = np.std(facts_ary,axis=0)
print("mean 5' UTR length:",int(mean_5utr),"+/-",int(std_5utr))
print("mean ORF length:",int(mean_orf), "+/-",int(std_orf))
print("mean 3' UTR length:",int(mean_3utr),"+/-",int(std_3utr))
pc_sim,nc_sim = make_generators(BASES)
pc_train = pc_sim.get_sequences(PC_SEQUENCES)
nc_train = nc_sim.get_sequences(NC_SEQUENCES)
print("Train on",len(pc_train),"PC seqs")
get_the_facts(pc_train)
print("Train on",len(nc_train),"NC seqs")
get_the_facts(nc_train)
# Use code from our SimTools library.
X,y = prepare_inputs_len_x_alphabet(pc_train,nc_train,ALPHABET) # shuffles
print("Data ready.")
def make_DNN():
print("make_DNN")
print("input shape:",INPUT_SHAPE)
dnn = Sequential()
#dnn.add(Embedding(input_dim=ALPHABET, output_dim=ALPHABET))
#VOCABULARY_SIZE, EMBED_DIMEN, input_length=1000, input_length=1000, mask_zero=True)
#input_dim=[None,VOCABULARY_SIZE], output_dim=EMBED_DIMEN, mask_zero=True)
dnn.add(LSTM(NEURONS,return_sequences=True,input_shape=INPUT_SHAPE))
dnn.add(LSTM(NEURONS,return_sequences=False))
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=np.float32))
dnn.add(Dense(1,activation="sigmoid",dtype=np.float32))
dnn.compile(optimizer='adam',
loss=BinaryCrossentropy(from_logits=False),
metrics=['accuracy']) # add to default metrics=loss
dnn.build() # input_shape=INPUT_SHAPE)
#ln_rate = tf.keras.optimizers.Adam(learning_rate = LN_RATE)
#bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
#model.compile(loss=bc, optimizer=ln_rate, metrics=["accuracy"])
return dnn
model = make_DNN()
print(model.summary())
from keras.callbacks import ModelCheckpoint
def do_cross_validation(X,y):
cv_scores = []
fold=0
mycallbacks = [ModelCheckpoint(
filepath=MODELPATH, save_best_only=True,
monitor='val_accuracy', mode='max')]
splitter = KFold(n_splits=SPLITS) # this does not shuffle
for train_index,valid_index in splitter.split(X):
if fold < FOLDS:
fold += 1
X_train=X[train_index] # inputs for training
y_train=y[train_index] # labels for training
X_valid=X[valid_index] # inputs for validation
y_valid=y[valid_index] # labels for validation
print("MODEL")
# Call constructor on each CV. Else, continually improves the same model.
model = model = make_DNN()
print("FIT") # model.fit() implements learning
start_time=time.time()
history=model.fit(X_train, y_train,
epochs=EPOCHS,
verbose=1, # ascii art while learning
callbacks=mycallbacks, # called at end of each epoch
validation_data=(X_valid,y_valid))
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
# print(history.history.keys()) # all these keys will be shown in figure
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale
plt.show()
do_cross_validation(X,y)
from keras.models import load_model
pc_sim.set_reproducible(True)
nc_sim.set_reproducible(True)
pc_test = pc_sim.get_sequences(PC_TESTS)
nc_test = nc_sim.get_sequences(NC_TESTS)
X,y = prepare_inputs_len_x_alphabet(pc_test,nc_test,ALPHABET)
best_model=load_model(MODELPATH)
scores = best_model.evaluate(X, y, verbose=0)
print("The best model parameters were saved during cross-validation.")
print("Best was defined as maximum validation accuracy at end of any epoch.")
print("Now re-load the best model and test it on previously unseen data.")
print("Test on",len(pc_test),"PC seqs")
print("Test on",len(nc_test),"NC seqs")
print("%s: %.2f%%" % (best_model.metrics_names[1], scores[1]*100))
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
ns_probs = [0 for _ in range(len(y))]
bm_probs = best_model.predict(X)
ns_auc = roc_auc_score(y, ns_probs)
bm_auc = roc_auc_score(y, bm_probs)
ns_fpr, ns_tpr, _ = roc_curve(y, ns_probs)
bm_fpr, bm_tpr, _ = roc_curve(y, bm_probs)
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='Guess, auc=%.4f'%ns_auc)
plt.plot(bm_fpr, bm_tpr, marker='.', label='Model, auc=%.4f'%bm_auc)
plt.title('ROC')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.show()
print("%s: %.2f%%" %('AUC',bm_auc))
t = time.time()
time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
```
|
github_jupyter
|
import time
t = time.time()
time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
PC_SEQUENCES=2000 # how many protein-coding sequences
NC_SEQUENCES=2000 # how many non-coding sequences
PC_TESTS=1000
NC_TESTS=1000
BASES=1000 # how long is each sequence
ALPHABET=4 # how many different letters are possible
INPUT_SHAPE_2D = (BASES,ALPHABET,1) # Conv2D needs 3D inputs
INPUT_SHAPE = (BASES,ALPHABET) # Conv1D needs 2D inputs
NEURONS = 32
#DROP_RATE = 0.2
EPOCHS=50 # how many times to train on all the data
SPLITS=5 # SPLITS=3 means train on 2/3 and validate on 1/3
FOLDS=5 # train the model this many times (range 1 to SPLITS)
import sys
try:
from google.colab import drive
IN_COLAB = True
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
#drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_gen.py')
with open('RNA_gen.py', 'w') as f:
f.write(r.text)
from RNA_gen import *
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(r.text)
from RNA_describe import *
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_prep.py')
with open('RNA_prep.py', 'w') as f:
f.write(r.text)
from RNA_prep import *
except:
print("CoLab not working. On my PC, use relative paths.")
IN_COLAB = False
DATAPATH='data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_gen import *
from SimTools.RNA_describe import *
from SimTools.RNA_prep import *
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
if not assert_imported_RNA_gen():
print("ERROR: Cannot use RNA_gen.")
if not assert_imported_RNA_prep():
print("ERROR: Cannot use RNA_prep.")
from os import listdir
import csv
from zipfile import ZipFile
import numpy as np
import pandas as pd
from scipy import stats # mode
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import LSTM
from keras.losses import BinaryCrossentropy
# tf.keras.losses.BinaryCrossentropy
import matplotlib.pyplot as plt
from matplotlib import colors
mycmap = colors.ListedColormap(['red','blue']) # list color for label 0 then 1
np.set_printoptions(precision=2)
# Use code from our SimTools library.
def make_generators(seq_len):
pcgen = Collection_Generator()
pcgen.get_len_oracle().set_mean(seq_len)
trans = Transcript_Oracle()
trans.set_orf_len_mean(50) #### Very short ORFs!
pcgen.set_seq_oracle(trans)
ncgen = Collection_Generator()
ncgen.get_len_oracle().set_mean(seq_len)
return pcgen,ncgen
def get_the_facts(seqs):
rd = RNA_describer()
facts = rd.get_three_lengths(seqs)
facts_ary = np.asarray(facts) # 5000 rows, 3 columns
print("Facts array:",type(facts_ary))
print("Facts array:",facts_ary.shape)
# Get the mean of each column
mean_5utr, mean_orf, mean_3utr = np.mean(facts_ary,axis=0)
std_5utr, std_orf, std_3utr = np.std(facts_ary,axis=0)
print("mean 5' UTR length:",int(mean_5utr),"+/-",int(std_5utr))
print("mean ORF length:",int(mean_orf), "+/-",int(std_orf))
print("mean 3' UTR length:",int(mean_3utr),"+/-",int(std_3utr))
pc_sim,nc_sim = make_generators(BASES)
pc_train = pc_sim.get_sequences(PC_SEQUENCES)
nc_train = nc_sim.get_sequences(NC_SEQUENCES)
print("Train on",len(pc_train),"PC seqs")
get_the_facts(pc_train)
print("Train on",len(nc_train),"NC seqs")
get_the_facts(nc_train)
# Use code from our SimTools library.
X,y = prepare_inputs_len_x_alphabet(pc_train,nc_train,ALPHABET) # shuffles
print("Data ready.")
def make_DNN():
print("make_DNN")
print("input shape:",INPUT_SHAPE)
dnn = Sequential()
#dnn.add(Embedding(input_dim=ALPHABET, output_dim=ALPHABET))
#VOCABULARY_SIZE, EMBED_DIMEN, input_length=1000, input_length=1000, mask_zero=True)
#input_dim=[None,VOCABULARY_SIZE], output_dim=EMBED_DIMEN, mask_zero=True)
dnn.add(LSTM(NEURONS,return_sequences=True,input_shape=INPUT_SHAPE))
dnn.add(LSTM(NEURONS,return_sequences=False))
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=np.float32))
dnn.add(Dense(1,activation="sigmoid",dtype=np.float32))
dnn.compile(optimizer='adam',
loss=BinaryCrossentropy(from_logits=False),
metrics=['accuracy']) # add to default metrics=loss
dnn.build() # input_shape=INPUT_SHAPE)
#ln_rate = tf.keras.optimizers.Adam(learning_rate = LN_RATE)
#bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
#model.compile(loss=bc, optimizer=ln_rate, metrics=["accuracy"])
return dnn
model = make_DNN()
print(model.summary())
from keras.callbacks import ModelCheckpoint
def do_cross_validation(X,y):
cv_scores = []
fold=0
mycallbacks = [ModelCheckpoint(
filepath=MODELPATH, save_best_only=True,
monitor='val_accuracy', mode='max')]
splitter = KFold(n_splits=SPLITS) # this does not shuffle
for train_index,valid_index in splitter.split(X):
if fold < FOLDS:
fold += 1
X_train=X[train_index] # inputs for training
y_train=y[train_index] # labels for training
X_valid=X[valid_index] # inputs for validation
y_valid=y[valid_index] # labels for validation
print("MODEL")
# Call constructor on each CV. Else, continually improves the same model.
model = model = make_DNN()
print("FIT") # model.fit() implements learning
start_time=time.time()
history=model.fit(X_train, y_train,
epochs=EPOCHS,
verbose=1, # ascii art while learning
callbacks=mycallbacks, # called at end of each epoch
validation_data=(X_valid,y_valid))
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
# print(history.history.keys()) # all these keys will be shown in figure
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale
plt.show()
do_cross_validation(X,y)
from keras.models import load_model
pc_sim.set_reproducible(True)
nc_sim.set_reproducible(True)
pc_test = pc_sim.get_sequences(PC_TESTS)
nc_test = nc_sim.get_sequences(NC_TESTS)
X,y = prepare_inputs_len_x_alphabet(pc_test,nc_test,ALPHABET)
best_model=load_model(MODELPATH)
scores = best_model.evaluate(X, y, verbose=0)
print("The best model parameters were saved during cross-validation.")
print("Best was defined as maximum validation accuracy at end of any epoch.")
print("Now re-load the best model and test it on previously unseen data.")
print("Test on",len(pc_test),"PC seqs")
print("Test on",len(nc_test),"NC seqs")
print("%s: %.2f%%" % (best_model.metrics_names[1], scores[1]*100))
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
ns_probs = [0 for _ in range(len(y))]
bm_probs = best_model.predict(X)
ns_auc = roc_auc_score(y, ns_probs)
bm_auc = roc_auc_score(y, bm_probs)
ns_fpr, ns_tpr, _ = roc_curve(y, ns_probs)
bm_fpr, bm_tpr, _ = roc_curve(y, bm_probs)
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='Guess, auc=%.4f'%ns_auc)
plt.plot(bm_fpr, bm_tpr, marker='.', label='Model, auc=%.4f'%bm_auc)
plt.title('ROC')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.show()
print("%s: %.2f%%" %('AUC',bm_auc))
t = time.time()
time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
| 0.446253 | 0.535281 |
课程地址: <https://class.coursera.org/neuralnets-2012-001/lecture/>
Contents:
[Why do we need machine learning](#1)
[What are neural networks?](#14)
[Lecture 1c - Some simple models of neurons](#21)
[Lecture 1d - A simple example of learning](#30)
[Lecture 1e - Three types of learning](#42)
##### 1

##### 2

那么,欺诈检测究竟是怎么做的,用机器学习和神经网络的方法分别怎么做。
##### 3

##### 4

##### 5

竟特意拿出果蝇来举例。MNIST,Hinton读作"艾姆尼思特",懒得打音标了。
##### 6

##### 7

We now have neural nets with approaching 100 million parameters in them that can recoginze 1000 differ...
##### 8

##### 9

##### 10

##### 11

这一章并不是很明白,不过反正不是重点。为什么我在这里想到了隐马和维特比算法?就是在求最有可能的序列。
##### 12

uses many layers of binary neurons. binary neuron就是输出只有两个分类的意思吧。
##### 13

##### 14

这个视频讲生物的神经网络。
##### 15

##### 16

名词解释:
axon:轴突
dendritic tree:树突
axon hillock:轴丘
synapse: 突触
参考[百度百科](http://baike.baidu.com/link?url=D74u2pbY7f0WO2Au0jIieXAs0R1vLUtrOA1lJQ1CPkXbEb5vQdSITDjad6brtuQacVy18OgXTOHNEWnftYCdLM1Fu0InsA3p5a-iR_pmgfe)
轴突发送信号,树突收集信号,突触就是轴突和树突传递信号的地方,轴丘我的理解是轴突上产生信号的地方。
##### 17

高中生物的内容了,就是讲的传递的过程。
##### 18

突触具有适应性,学习的过程就是在改变突触。
##### 19

总共有$10 ^ {15}$个weights

##### 20

这里讲到了那个经常被提起的例子,对于原本属于听觉的脑区,切断与耳朵的输入,反而加上视觉的输入,最终,这个脑区长成了和视觉神经相同的样子。描述可能不太准确,但大概就是这个意思。
##### 21

##### 22

扯到科学方法论了,不过确实很有道理。
##### 23

##### 24

##### 25

##### 26

##### 27

##### 28



熟悉熟悉这几个值
##### 29

注意和前面sigmoid的区别

##### 30

##### 31

##### 32

##### 33

现在是随机的weights,后面就开始训练了。
##### 34

##### 35

##### 36

##### 37

##### 38

##### 39

7和9的下半部分并不明显,说明训练的模型主要依靠上半部分来进行区分。

##### 40

##### 41

没有一个模版template能够fit绿框中的2,却不fit红框中的3
##### 42

课程前半段讲Supervised learning, 后半段讲Unsupervised learning, 不会讲Reinforcement learning
##### 43

##### 44


##### 45

##### 46

这里讲到reinforment learning 一般只有几十至几千个参数,因为a scalar reward does not supply much information
##### 47

##### 48

PCA是一种线形方法,假设有一个流形,这个流形恰好是高维中的一个平面?
|
github_jupyter
|
课程地址: <https://class.coursera.org/neuralnets-2012-001/lecture/>
Contents:
[Why do we need machine learning](#1)
[What are neural networks?](#14)
[Lecture 1c - Some simple models of neurons](#21)
[Lecture 1d - A simple example of learning](#30)
[Lecture 1e - Three types of learning](#42)
##### 1

##### 2

那么,欺诈检测究竟是怎么做的,用机器学习和神经网络的方法分别怎么做。
##### 3

##### 4

##### 5

竟特意拿出果蝇来举例。MNIST,Hinton读作"艾姆尼思特",懒得打音标了。
##### 6

##### 7

We now have neural nets with approaching 100 million parameters in them that can recoginze 1000 differ...
##### 8

##### 9

##### 10

##### 11

这一章并不是很明白,不过反正不是重点。为什么我在这里想到了隐马和维特比算法?就是在求最有可能的序列。
##### 12

uses many layers of binary neurons. binary neuron就是输出只有两个分类的意思吧。
##### 13

##### 14

这个视频讲生物的神经网络。
##### 15

##### 16

名词解释:
axon:轴突
dendritic tree:树突
axon hillock:轴丘
synapse: 突触
参考[百度百科](http://baike.baidu.com/link?url=D74u2pbY7f0WO2Au0jIieXAs0R1vLUtrOA1lJQ1CPkXbEb5vQdSITDjad6brtuQacVy18OgXTOHNEWnftYCdLM1Fu0InsA3p5a-iR_pmgfe)
轴突发送信号,树突收集信号,突触就是轴突和树突传递信号的地方,轴丘我的理解是轴突上产生信号的地方。
##### 17

高中生物的内容了,就是讲的传递的过程。
##### 18

突触具有适应性,学习的过程就是在改变突触。
##### 19

总共有$10 ^ {15}$个weights

##### 20

这里讲到了那个经常被提起的例子,对于原本属于听觉的脑区,切断与耳朵的输入,反而加上视觉的输入,最终,这个脑区长成了和视觉神经相同的样子。描述可能不太准确,但大概就是这个意思。
##### 21

##### 22

扯到科学方法论了,不过确实很有道理。
##### 23

##### 24

##### 25

##### 26

##### 27

##### 28



熟悉熟悉这几个值
##### 29

注意和前面sigmoid的区别

##### 30

##### 31

##### 32

##### 33

现在是随机的weights,后面就开始训练了。
##### 34

##### 35

##### 36

##### 37

##### 38

##### 39

7和9的下半部分并不明显,说明训练的模型主要依靠上半部分来进行区分。

##### 40

##### 41

没有一个模版template能够fit绿框中的2,却不fit红框中的3
##### 42

课程前半段讲Supervised learning, 后半段讲Unsupervised learning, 不会讲Reinforcement learning
##### 43

##### 44


##### 45

##### 46

这里讲到reinforment learning 一般只有几十至几千个参数,因为a scalar reward does not supply much information
##### 47

##### 48

PCA是一种线形方法,假设有一个流形,这个流形恰好是高维中的一个平面?
| 0.434221 | 0.688874 |
[](https://mybinder.org/v2/gh/stammler/simframe/HEAD?labpath=examples%2Fexample_coupled_oscillators.ipynb)
# Example: Coupled Oscillators
In this example we'll have a look at coupled oscillations of two bodies with masses $m_i$ connected via three springs to themselves and attached to walls. The goal is to calculate the time evolution of this system if it's not in equilibrium. The springs have spring constants of $k_i$ and lengths of $l_i$ when no forces are acting on them. The distance between the walls is $L$.
```
k1, l1 = 10., 6.
k2, l2 = 20., 6.
k3, l3 = 10., 6.
m1, m2 = 1., 1.
L = 15.
```
Springs connected serially have a resulting spring constant of $K$, which is the inverse sum of the individual spring constants.
```
Kinv = 1./k1 + 1./k2 + 1./k3
K = 1./Kinv
```
The force exerted by a spring is given by
$\vec{F} = -k\cdot\vec{d}$
where $\vec{d}$ is the displacement vector from its equilibrium position. The system can be more easily solved by solving for the time evolution of the displacements $\vec{d}_i$ of the bodies. To convert it back into actual coordinates, we first have to finde the equilibrium positions $\vec{x}_i$ of the bodies.
If the system is in equilibrium, the forces acting on each individual spring are identical.
```
F = - ( L - ( l1 + l2 + l3 ) ) * K
```
From this we can calculate the equilibrium positions.
```
x1 = l1 - F/k1
x2 = F/k3 + L - l3
```
In equilibrium our system looks as follows.
```
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
def getspring(l1, l2, bw):
l1 = float(l1)
l2 = float(l2)
bw = float(bw)
L = l2 - l1
d = L/6.
x = np.array([l1, d, d/2., d, d, d, d/2., d], dtype=float)
for i in range(1, 8):
x[i] += x[i-1]
y = np.array([0., 0., 2.*bw, -2.*bw, 2.*bw, -2*bw, 0., 0.], dtype=float)
return x, y
def plot_system(bw, x1, x2, L):
fig, ax = plt.subplots(dpi=150)
ax.axis("off")
ax.set_aspect(1.)
rectl = patches.Rectangle((-bw, -4.*bw), bw, 8.*bw, linewidth=1, edgecolor="#000000", facecolor="#dddddd", hatch="//")
ax.add_patch(rectl)
rectr = patches.Rectangle((L, -4.*bw), bw, 8.*bw, linewidth=1, edgecolor="#000000", facecolor="#dddddd", hatch="//")
ax.add_patch(rectr)
body1 = patches.Circle((x1, 0), bw, linewidth=1, edgecolor="#000000", facecolor="C1")
ax.add_patch(body1)
body2 = patches.Circle((x2, 0), bw, linewidth=1, edgecolor="#000000", facecolor="C9")
ax.add_patch(body2)
s1x, s1y = getspring(0., x1-bw, bw)
ax.plot(s1x, s1y, c="#000000", lw=1)
s2x, s2y = getspring(x1+bw, x2-bw, bw)
ax.plot(s2x, s2y, c="#000000", lw=1)
s3x, s3y = getspring(x2+bw, L, bw)
ax.plot(s3x, s3y, c="#000000", lw=1)
ax.set_xlim(-2.*bw, L+2.*bw)
ax.set_ylim(-3., 3.)
fig.tight_layout()
return fig, ax
bw = 0.5
fig, ax = plot_system(bw, x1, x2, L)
ax.text((x1+0.)/2., 3*bw, "$k_1$", verticalalignment="center", horizontalalignment="center")
ax.text((x2+x1)/2., 3*bw, "$k_2$", verticalalignment="center", horizontalalignment="center")
ax.text((L +x2)/2., 3*bw, "$k_3$", verticalalignment="center", horizontalalignment="center")
ax.text(x1, 0., "$m_1$", verticalalignment="center", horizontalalignment="center")
ax.text(x2, 0., "$m_2$", verticalalignment="center", horizontalalignment="center")
ax.annotate(text='', xy=(0., -4.*bw), xytext=(L, -4.*bw), arrowprops=dict(arrowstyle='<->', lw=1))
ax.text(L/2, -5.*bw, "$L$", verticalalignment="center", horizontalalignment="center")
plt.show()
```
We know want to displace mass $m_1$ from its equilibrium position and calculate the time evolution of the whole system.
The force acting on $m_1$ is given by
$F_1 = m\dot{v_1} = -(k_1 + k_2) \cdot d_1 + k_2 \cdot d_2$
Vector notation is ommited since the problem is one-dimensional. The change in the displacement $d_1$ is given by
$\dot{d_1} = v_1$
Similarily for the second body
$F_2 = m\dot{v_2} = -(k_2 + k_3) \cdot d_2 + k_2 \cdot d_1$
$\dot{d_2} = v_2$
This is a system of coupled differential equations that can be written in matrix form
$\begin{pmatrix} \dot{d_1} \\ \dot{d_2} \\ \dot{v_1} \\ \dot{v_2} \end{pmatrix} = \begin{pmatrix} 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \\ -\frac{k_1+k_2}{m_1} & \frac{k_2}{m_1} & 0 & 0 \\ \frac{k_2}{m_2} & -\frac{k_2+k_3}{m_2} & 0 & 0 \end{pmatrix} \cdot \begin{pmatrix} d_1 \\ d_2 \\ v_1 \\ v_2 \end{pmatrix}$
or short
$\frac{\mathrm{d}}{\mathrm{d}t} \vec{Y} = \mathbf{J} \cdot \vec{Y}$
with the state vector $\vec{Y}$ and the Jacobian $\mathbf{J}$.
We can now start setting up the frame.
```
from simframe import Frame
sim = Frame(description="Coupled Oscillators")
import numpy as np
Y = np.array([-3., 0., 0., 0.])
sim.addfield("Y", Y, description="State vector")
```
In this configuration mass $m_1$ is displaced by $3$ to the left while $m_2$ is in its equilibrium position. Both bodies are at rest.
We set up the time as integration variable.
```
sim.addintegrationvariable("t", 0., description="Time")
def dt(sim):
return 0.1
sim.t.updater = dt
```
We define the snapshots according the frames per second and maximum simulation time that we want to have in the animation later.
```
fps = 30.
t_max = 15.
sim.t.snapshots = np.arange(1./fps, t_max, 1./fps)
```
In principle this would be enough to run the simulation. But for convenience we set up a few more fields and groups.
```
sim.addgroup("b1", description="Body 1")
sim.addgroup("b2", description="Body 2")
# Body 1
sim.b1.addfield("m" , m1, description="Mass", constant=True)
sim.b1.addfield("d" , 0., description="Displacement")
sim.b1.addfield("x" , 0., description="Position")
sim.b1.addfield("x0", x1, description="Equilibrium Position", constant=True)
sim.b1.addfield("v" , 0., description="Velocity")
# Body 2
sim.b2.addfield("m" , m2, description="Mass", constant=True)
sim.b2.addfield("d" , 0., description="Displacement")
sim.b2.addfield("x" , 0., description="Position")
sim.b2.addfield("x0", x2, description="Equilibrium Position", constant=True)
sim.b2.addfield("v" , 0., description="Velocity")
```
These fields need to be updated from the state vector.
```
# Body 1
def update_d1(sim):
return sim.Y[0]
sim.b1.d.updater = update_d1
def update_v1(sim):
return sim.Y[2]
sim.b1.v.updater = update_v1
def update_x1(sim):
return sim.b1.x0 + sim.b1.d
sim.b1.x.updater = update_x1
# Body 2
def update_d2(sim):
return sim.Y[1]
sim.b2.d.updater = update_d2
def update_v2(sim):
return sim.Y[3]
sim.b2.v.updater = update_v2
def update_x2(sim):
return sim.b2.x0 + sim.b2.d
sim.b2.x.updater = update_x2
```
And we are adding more groups for the spring parameters.
```
sim.addgroup("s1", description="Spring 1")
sim.addgroup("s2", description="Spring 2")
sim.addgroup("s3", description="Spring 3")
sim.s1.addfield("k", k1, description="Spring Constant", constant=True)
sim.s1.addfield("l", l1, description="Length", constant=True)
sim.s2.addfield("k", k2, description="Spring Constant", constant=True)
sim.s2.addfield("l", l2, description="Length", constant=True)
sim.s3.addfield("k", k3, description="Spring Constant", constant=True)
sim.s3.addfield("l", l3, description="Length", constant=True)
```
We now have to tell `simframe` in what order to update the fields.
```
# The groups for the bodies. The order does not matter
sim.updater = ["b1", "b2"]
# The fields in the groups. Displacement has to be updated before position
sim.b1.updater = ["d", "v", "x"]
sim.b2.updater = ["d", "v", "x"]
```
We can now fill the fields with their initial conditions from the state vector.
```
sim.update()
```
The initial state of the system looks as follows:
```
fig, ax = plot_system(0.5, sim.b1.x, sim.b2.x, L)
circ = patches.Circle((sim.b1.x0, 0.), 0.5, linewidth=1, edgecolor="#000000", facecolor="C1", alpha=0.15)
ax.add_patch(circ)
plt.show()
```
**Printing the complete frame structure**
```
sim.toc
```
**Setting up the Jacobian**
For implicit schemes we have to calculate the Jacobian. Since in this case the Jacobian is constant with time, we can define it outside of the frame object.
```
jac = np.array([[ 0., 0., 1., 0.],
[ 0., 0., 0., 1.],
[-(k1+k2)/m1, k2/m1, 0., 0.],
[ k2/m2, -(k2+k3)/m2, 0., 0.]], dtype=float)
jac
def jac_impl(sim, x):
return jac
sim.Y.jacobinator = jac_impl
```
**Setting up the Integrator**
We can now set up the integrator just as in the previous examples.
```
from simframe import Integrator
from simframe import Instruction
from simframe import schemes
sim.integrator = Integrator(sim.t)
sim.integrator.instructions = [Instruction(schemes.impl_1_euler_direct, sim.Y)]
```
**Setting up the Writer**
We also have to set up the writer. In this case we don't want to write data files. So we simply write the data into a namespace
```
from simframe import writers
sim.writer = writers.namespacewriter()
sim.writer.verbosity = 0
```
**Starting the simulation**
```
sim.run()
```
**Reading data**
```
data = sim.writer.read.all()
def plot_oszillations(data):
fig, ax = plt.subplots(dpi=150)
ax.plot(data.b1.x, data.t, c="C1")
ax.plot(data.b2.x, data.t, c="C9")
ax.axvline(data.b1.x0[0], c="#000000", alpha=0.5, lw=1)
ax.axvline(data.b2.x0[0], c="#000000", alpha=0.5, lw=1)
ax.set_xlim(0, L)
ax.set_ylim(data.t[-1], data.t[0])
ax.set_xlabel("Position")
ax.set_ylabel("Time")
fig.tight_layout()
plt.show()
plot_oszillations(data)
from matplotlib import animation
from IPython.display import HTML
def plot_animation(data, bw):
fig, ax = plt.subplots()
ax.axis("off")
ax.set_aspect(1.)
l1, = ax.plot(data.b1.x, data.t-data.t[0], c="C1", lw=1, zorder=-1)
l2, = ax.plot(data.b2.x, data.t-data.t[0], c="C9", lw=1, zorder=-1)
rectl = patches.Rectangle((-bw, -4.*bw), bw, 8.*bw, linewidth=1, edgecolor="#000000", facecolor="#dddddd", hatch="//")
ax.add_patch(rectl)
rectr = patches.Rectangle((L, -4.*bw), bw, 8.*bw, linewidth=1, edgecolor="#000000", facecolor="#dddddd", hatch="//")
ax.add_patch(rectr)
ax.set_xlim(-2.*bw, L+2.*bw)
ax.set_ylim(-5., 5.)
b1 = patches.Circle((data.b1.x[0], 0), bw, linewidth=1, edgecolor="#000000", facecolor="C1")
ax.add_patch(b1)
b2 = patches.Circle((data.b2.x[0], 0), bw, linewidth=1, edgecolor="#000000", facecolor="C9")
ax.add_patch(b2)
x, y = getspring(0., data.b1.x[0]-bw, bw)
s1, = ax.plot(x, y, c="#000000", lw=1)
x, y = getspring(data.b1.x[0]+bw, data.b2.x[0]-bw, bw)
s2, = ax.plot(x, y, c="#000000", lw=1)
x, y = getspring(data.b2.x[0]+bw, L, bw)
s3, = ax.plot(x, y, c="#000000", lw=1)
return fig, ax, l1, l2, b1, b2, s1, s2, s3
def init():
l1.set_data(data.b1.x, data.t-data.t[0])
l2.set_data(data.b2.x, data.t-data.t[0])
b1.center = (data.b1.x[0], 0.)
ax.add_patch(b1)
b2.center = (data.b2.x[0], 0.)
ax.add_patch(b2)
x, y = getspring(0., data.b1.x[0]-bw, bw)
s1.set_data(x, y)
x, y = getspring(data.b1.x[0]+bw, data.b2.x[0]-bw, bw)
s2.set_data(x, y)
x, y = getspring(data.b2.x[0]+bw, L, bw)
s3.set_data(x, y)
plt.show()
return l1, l2, b1, b2, s1, s2, s3
def animate(i):
l1.set_data(data.b1.x, data.t-data.t[i])
l2.set_data(data.b2.x, data.t-data.t[i])
b1.center = (data.b1.x[i], 0.)
b2.center = (data.b2.x[i], 0.)
x, y = getspring(0., data.b1.x[i]-bw, bw)
s1.set_data(x, y)
x, y = getspring(data.b1.x[i]+bw, data.b2.x[i]-bw, bw)
s2.set_data(x, y)
x, y = getspring(data.b2.x[i]+bw, L, bw)
s3.set_data(x, y)
return l1, l2, b1, b2, s1, s2, s3
fig, ax, l1, l2, b1, b2, s1, s2, s3 = plot_animation(data, 0.5)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(data.t), interval=1.e3/fps, blit=True)
HTML(anim.to_html5_video())
```
As you can see the oscillation is damped pretty quickly, which is weird because we did not include any damping term into our differential equations for the velocities.
We can plot the maximum relative error of the total energy in the system.
```
def plot_energy(data):
T = 0.5*(data.b1.m*data.Y[:, 2]**2 + data.b2.m*data.Y[:, 3]**2)
V = 0.5*(data.s1.k*data.Y[:, 0]**2 + data.s2.k*(data.Y[:, 1]-data.Y[:, 0])**2 + data.s3.k*data.Y[:, 1]**2)
E = T + V
dE = np.abs(E-E[0])/E[0]
fig, ax = plt.subplots(dpi=150)
ax.semilogy(data.t, dE)
ax.set_xlabel("Time")
ax.set_ylabel("Relative Energy Error")
ax.set_xlim(data.t[0], data.t[-1])
fig.tight_layout()
plt.show()
plot_energy(data)
```
The damping is purely numerically. The cause for the damping is the implicit integrator scheme used here, that is not suited for the problem, similar to the explicit integrator used for the orbital integration.
The implicit midpoint method is simplectic, i.e., energy conserving. We can use this instead.
**Resetting**
```
sim.Y = (-3., 0., 0., 0)
sim.update()
sim.t = 0
sim.writer.reset()
sim.integrator.instructions = [Instruction(schemes.impl_2_midpoint_direct, sim.Y)]
sim.run()
data = sim.writer.read.all()
plot_oszillations(data)
fig, ax, l1, l2, b1, b2, s1, s2, s3 = plot_animation(data, 0.5)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(data.t), interval=1.e3/fps, blit=True)
HTML(anim.to_html5_video())
```
Now the damping is gone. The energy conservation plot now reads.
```
plot_energy(data)
```
The energy error is of the order of the machine precision error.
But we could also easily use an explicit scheme. For explicit integration, we do not have to set the differentiator, since we have set a Jacobian and `simframe` is automatically calculating the derivative from the Jacobian.
**Resetting**
```
sim.Y = (-3., 0., 0., 0)
sim.update()
sim.t = 0
sim.writer.reset()
sim.integrator.instructions = [Instruction(schemes.expl_4_runge_kutta, sim.Y)]
sim.run()
data = sim.writer.read.all()
plot_oszillations(data)
fig, ax, l1, l2, b1, b2, s1, s2, s3 = plot_animation(data, 0.5)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(data.t), interval=1.e3/fps, blit=True)
HTML(anim.to_html5_video())
plot_energy(data)
```
Here the energy error larger than for the symplectic scheme, but still smaller than in the first try.
|
github_jupyter
|
k1, l1 = 10., 6.
k2, l2 = 20., 6.
k3, l3 = 10., 6.
m1, m2 = 1., 1.
L = 15.
Kinv = 1./k1 + 1./k2 + 1./k3
K = 1./Kinv
F = - ( L - ( l1 + l2 + l3 ) ) * K
x1 = l1 - F/k1
x2 = F/k3 + L - l3
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
def getspring(l1, l2, bw):
l1 = float(l1)
l2 = float(l2)
bw = float(bw)
L = l2 - l1
d = L/6.
x = np.array([l1, d, d/2., d, d, d, d/2., d], dtype=float)
for i in range(1, 8):
x[i] += x[i-1]
y = np.array([0., 0., 2.*bw, -2.*bw, 2.*bw, -2*bw, 0., 0.], dtype=float)
return x, y
def plot_system(bw, x1, x2, L):
fig, ax = plt.subplots(dpi=150)
ax.axis("off")
ax.set_aspect(1.)
rectl = patches.Rectangle((-bw, -4.*bw), bw, 8.*bw, linewidth=1, edgecolor="#000000", facecolor="#dddddd", hatch="//")
ax.add_patch(rectl)
rectr = patches.Rectangle((L, -4.*bw), bw, 8.*bw, linewidth=1, edgecolor="#000000", facecolor="#dddddd", hatch="//")
ax.add_patch(rectr)
body1 = patches.Circle((x1, 0), bw, linewidth=1, edgecolor="#000000", facecolor="C1")
ax.add_patch(body1)
body2 = patches.Circle((x2, 0), bw, linewidth=1, edgecolor="#000000", facecolor="C9")
ax.add_patch(body2)
s1x, s1y = getspring(0., x1-bw, bw)
ax.plot(s1x, s1y, c="#000000", lw=1)
s2x, s2y = getspring(x1+bw, x2-bw, bw)
ax.plot(s2x, s2y, c="#000000", lw=1)
s3x, s3y = getspring(x2+bw, L, bw)
ax.plot(s3x, s3y, c="#000000", lw=1)
ax.set_xlim(-2.*bw, L+2.*bw)
ax.set_ylim(-3., 3.)
fig.tight_layout()
return fig, ax
bw = 0.5
fig, ax = plot_system(bw, x1, x2, L)
ax.text((x1+0.)/2., 3*bw, "$k_1$", verticalalignment="center", horizontalalignment="center")
ax.text((x2+x1)/2., 3*bw, "$k_2$", verticalalignment="center", horizontalalignment="center")
ax.text((L +x2)/2., 3*bw, "$k_3$", verticalalignment="center", horizontalalignment="center")
ax.text(x1, 0., "$m_1$", verticalalignment="center", horizontalalignment="center")
ax.text(x2, 0., "$m_2$", verticalalignment="center", horizontalalignment="center")
ax.annotate(text='', xy=(0., -4.*bw), xytext=(L, -4.*bw), arrowprops=dict(arrowstyle='<->', lw=1))
ax.text(L/2, -5.*bw, "$L$", verticalalignment="center", horizontalalignment="center")
plt.show()
from simframe import Frame
sim = Frame(description="Coupled Oscillators")
import numpy as np
Y = np.array([-3., 0., 0., 0.])
sim.addfield("Y", Y, description="State vector")
sim.addintegrationvariable("t", 0., description="Time")
def dt(sim):
return 0.1
sim.t.updater = dt
fps = 30.
t_max = 15.
sim.t.snapshots = np.arange(1./fps, t_max, 1./fps)
sim.addgroup("b1", description="Body 1")
sim.addgroup("b2", description="Body 2")
# Body 1
sim.b1.addfield("m" , m1, description="Mass", constant=True)
sim.b1.addfield("d" , 0., description="Displacement")
sim.b1.addfield("x" , 0., description="Position")
sim.b1.addfield("x0", x1, description="Equilibrium Position", constant=True)
sim.b1.addfield("v" , 0., description="Velocity")
# Body 2
sim.b2.addfield("m" , m2, description="Mass", constant=True)
sim.b2.addfield("d" , 0., description="Displacement")
sim.b2.addfield("x" , 0., description="Position")
sim.b2.addfield("x0", x2, description="Equilibrium Position", constant=True)
sim.b2.addfield("v" , 0., description="Velocity")
# Body 1
def update_d1(sim):
return sim.Y[0]
sim.b1.d.updater = update_d1
def update_v1(sim):
return sim.Y[2]
sim.b1.v.updater = update_v1
def update_x1(sim):
return sim.b1.x0 + sim.b1.d
sim.b1.x.updater = update_x1
# Body 2
def update_d2(sim):
return sim.Y[1]
sim.b2.d.updater = update_d2
def update_v2(sim):
return sim.Y[3]
sim.b2.v.updater = update_v2
def update_x2(sim):
return sim.b2.x0 + sim.b2.d
sim.b2.x.updater = update_x2
sim.addgroup("s1", description="Spring 1")
sim.addgroup("s2", description="Spring 2")
sim.addgroup("s3", description="Spring 3")
sim.s1.addfield("k", k1, description="Spring Constant", constant=True)
sim.s1.addfield("l", l1, description="Length", constant=True)
sim.s2.addfield("k", k2, description="Spring Constant", constant=True)
sim.s2.addfield("l", l2, description="Length", constant=True)
sim.s3.addfield("k", k3, description="Spring Constant", constant=True)
sim.s3.addfield("l", l3, description="Length", constant=True)
# The groups for the bodies. The order does not matter
sim.updater = ["b1", "b2"]
# The fields in the groups. Displacement has to be updated before position
sim.b1.updater = ["d", "v", "x"]
sim.b2.updater = ["d", "v", "x"]
sim.update()
fig, ax = plot_system(0.5, sim.b1.x, sim.b2.x, L)
circ = patches.Circle((sim.b1.x0, 0.), 0.5, linewidth=1, edgecolor="#000000", facecolor="C1", alpha=0.15)
ax.add_patch(circ)
plt.show()
sim.toc
jac = np.array([[ 0., 0., 1., 0.],
[ 0., 0., 0., 1.],
[-(k1+k2)/m1, k2/m1, 0., 0.],
[ k2/m2, -(k2+k3)/m2, 0., 0.]], dtype=float)
jac
def jac_impl(sim, x):
return jac
sim.Y.jacobinator = jac_impl
from simframe import Integrator
from simframe import Instruction
from simframe import schemes
sim.integrator = Integrator(sim.t)
sim.integrator.instructions = [Instruction(schemes.impl_1_euler_direct, sim.Y)]
from simframe import writers
sim.writer = writers.namespacewriter()
sim.writer.verbosity = 0
sim.run()
data = sim.writer.read.all()
def plot_oszillations(data):
fig, ax = plt.subplots(dpi=150)
ax.plot(data.b1.x, data.t, c="C1")
ax.plot(data.b2.x, data.t, c="C9")
ax.axvline(data.b1.x0[0], c="#000000", alpha=0.5, lw=1)
ax.axvline(data.b2.x0[0], c="#000000", alpha=0.5, lw=1)
ax.set_xlim(0, L)
ax.set_ylim(data.t[-1], data.t[0])
ax.set_xlabel("Position")
ax.set_ylabel("Time")
fig.tight_layout()
plt.show()
plot_oszillations(data)
from matplotlib import animation
from IPython.display import HTML
def plot_animation(data, bw):
fig, ax = plt.subplots()
ax.axis("off")
ax.set_aspect(1.)
l1, = ax.plot(data.b1.x, data.t-data.t[0], c="C1", lw=1, zorder=-1)
l2, = ax.plot(data.b2.x, data.t-data.t[0], c="C9", lw=1, zorder=-1)
rectl = patches.Rectangle((-bw, -4.*bw), bw, 8.*bw, linewidth=1, edgecolor="#000000", facecolor="#dddddd", hatch="//")
ax.add_patch(rectl)
rectr = patches.Rectangle((L, -4.*bw), bw, 8.*bw, linewidth=1, edgecolor="#000000", facecolor="#dddddd", hatch="//")
ax.add_patch(rectr)
ax.set_xlim(-2.*bw, L+2.*bw)
ax.set_ylim(-5., 5.)
b1 = patches.Circle((data.b1.x[0], 0), bw, linewidth=1, edgecolor="#000000", facecolor="C1")
ax.add_patch(b1)
b2 = patches.Circle((data.b2.x[0], 0), bw, linewidth=1, edgecolor="#000000", facecolor="C9")
ax.add_patch(b2)
x, y = getspring(0., data.b1.x[0]-bw, bw)
s1, = ax.plot(x, y, c="#000000", lw=1)
x, y = getspring(data.b1.x[0]+bw, data.b2.x[0]-bw, bw)
s2, = ax.plot(x, y, c="#000000", lw=1)
x, y = getspring(data.b2.x[0]+bw, L, bw)
s3, = ax.plot(x, y, c="#000000", lw=1)
return fig, ax, l1, l2, b1, b2, s1, s2, s3
def init():
l1.set_data(data.b1.x, data.t-data.t[0])
l2.set_data(data.b2.x, data.t-data.t[0])
b1.center = (data.b1.x[0], 0.)
ax.add_patch(b1)
b2.center = (data.b2.x[0], 0.)
ax.add_patch(b2)
x, y = getspring(0., data.b1.x[0]-bw, bw)
s1.set_data(x, y)
x, y = getspring(data.b1.x[0]+bw, data.b2.x[0]-bw, bw)
s2.set_data(x, y)
x, y = getspring(data.b2.x[0]+bw, L, bw)
s3.set_data(x, y)
plt.show()
return l1, l2, b1, b2, s1, s2, s3
def animate(i):
l1.set_data(data.b1.x, data.t-data.t[i])
l2.set_data(data.b2.x, data.t-data.t[i])
b1.center = (data.b1.x[i], 0.)
b2.center = (data.b2.x[i], 0.)
x, y = getspring(0., data.b1.x[i]-bw, bw)
s1.set_data(x, y)
x, y = getspring(data.b1.x[i]+bw, data.b2.x[i]-bw, bw)
s2.set_data(x, y)
x, y = getspring(data.b2.x[i]+bw, L, bw)
s3.set_data(x, y)
return l1, l2, b1, b2, s1, s2, s3
fig, ax, l1, l2, b1, b2, s1, s2, s3 = plot_animation(data, 0.5)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(data.t), interval=1.e3/fps, blit=True)
HTML(anim.to_html5_video())
def plot_energy(data):
T = 0.5*(data.b1.m*data.Y[:, 2]**2 + data.b2.m*data.Y[:, 3]**2)
V = 0.5*(data.s1.k*data.Y[:, 0]**2 + data.s2.k*(data.Y[:, 1]-data.Y[:, 0])**2 + data.s3.k*data.Y[:, 1]**2)
E = T + V
dE = np.abs(E-E[0])/E[0]
fig, ax = plt.subplots(dpi=150)
ax.semilogy(data.t, dE)
ax.set_xlabel("Time")
ax.set_ylabel("Relative Energy Error")
ax.set_xlim(data.t[0], data.t[-1])
fig.tight_layout()
plt.show()
plot_energy(data)
sim.Y = (-3., 0., 0., 0)
sim.update()
sim.t = 0
sim.writer.reset()
sim.integrator.instructions = [Instruction(schemes.impl_2_midpoint_direct, sim.Y)]
sim.run()
data = sim.writer.read.all()
plot_oszillations(data)
fig, ax, l1, l2, b1, b2, s1, s2, s3 = plot_animation(data, 0.5)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(data.t), interval=1.e3/fps, blit=True)
HTML(anim.to_html5_video())
plot_energy(data)
sim.Y = (-3., 0., 0., 0)
sim.update()
sim.t = 0
sim.writer.reset()
sim.integrator.instructions = [Instruction(schemes.expl_4_runge_kutta, sim.Y)]
sim.run()
data = sim.writer.read.all()
plot_oszillations(data)
fig, ax, l1, l2, b1, b2, s1, s2, s3 = plot_animation(data, 0.5)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(data.t), interval=1.e3/fps, blit=True)
HTML(anim.to_html5_video())
plot_energy(data)
| 0.566019 | 0.980281 |
# Data description & Problem statement:
The SMS Spam Collection is a set of SMS tagged messages that have been collected for SMS Spam research. It contains one set of SMS messages in English of 5,574 messages, tagged acording being ham (legitimate) or spam. I build a classifier using machine learning which will help in identifying whether a given SMS is a Spam or not.
# Workflow:
- Load the dataset
- Data cleaning (e.g. remove formats and punctuations)
- Basic data exploration
- Text vectorization, using "Bag of Words" technique
- Use "Latent Dirichlet Allocation" for document clustering (i.e. topic modeling)
- Determine, sort and print most important words/features for each topic
```
import sklearn
import numpy as np
import scipy as sc
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
%matplotlib inline
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
# we insatll and import spacy package for some advanced tokenizaion techniques:
import spacy
# we also install and import mglearn package (using !pip install mglearn) for some interesting visualization of results:
import mglearn
ls
```
# load and prepare the text data:
```
messages = [line.rstrip() for line in open('SMSSpamCollection')]
#labels = [1 if m.split('\t')[0]=='ham' else 0 for m in messages]
#sms = [m.split('\t')[1] for m in messages]
messages = pd.read_csv('SMSSpamCollection', sep='\t', names=['labels', 'sms'])
messages.describe()
messages.groupby('labels').describe()
messages['length']= messages['sms'].map(lambda x: len(x))
# histogram of length of ham messages:
plt.hist(messages[messages['labels']=='ham']['length'], bins=150)
plt.xlabel('length of ham messages')
plt.ylabel('frequency')
plt.show()
# histogram of length of spam messages:
plt.hist(messages[messages['labels']=='spam']['length'], bins=80)
plt.xlabel('length of spam messages')
plt.ylabel('frequency')
plt.show()
messages['labels'] = messages['labels'].map({'ham':1, 'spam':0})
from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer(max_features=500, stop_words="english",
ngram_range=(1, 1),
max_df=0.2)
X = vect.fit_transform(messages['sms'])
```
# document clustering with Latent Dirichlet Allocation: LDA
```
from sklearn.decomposition import LatentDirichletAllocation
lda = LatentDirichletAllocation(n_topics=3, learning_method="batch",
max_iter=32, random_state=42)
# We build the model and transform the data in one step
document_topics = lda.fit_transform(X)
# For each topic (a row in the components_), sort the features (ascending)
sorting = np.argsort(lda.components_, axis=1)[:, ::-1]
# Get the feature names from the vectorizer
feature_names = np.array(vect.get_feature_names())
# Print out the 10 topics:
mglearn.tools.print_topics(topics=range(3), feature_names=feature_names,
sorting=sorting, topics_per_chunk=5, n_words=10)
```
|
github_jupyter
|
import sklearn
import numpy as np
import scipy as sc
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
%matplotlib inline
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
# we insatll and import spacy package for some advanced tokenizaion techniques:
import spacy
# we also install and import mglearn package (using !pip install mglearn) for some interesting visualization of results:
import mglearn
ls
messages = [line.rstrip() for line in open('SMSSpamCollection')]
#labels = [1 if m.split('\t')[0]=='ham' else 0 for m in messages]
#sms = [m.split('\t')[1] for m in messages]
messages = pd.read_csv('SMSSpamCollection', sep='\t', names=['labels', 'sms'])
messages.describe()
messages.groupby('labels').describe()
messages['length']= messages['sms'].map(lambda x: len(x))
# histogram of length of ham messages:
plt.hist(messages[messages['labels']=='ham']['length'], bins=150)
plt.xlabel('length of ham messages')
plt.ylabel('frequency')
plt.show()
# histogram of length of spam messages:
plt.hist(messages[messages['labels']=='spam']['length'], bins=80)
plt.xlabel('length of spam messages')
plt.ylabel('frequency')
plt.show()
messages['labels'] = messages['labels'].map({'ham':1, 'spam':0})
from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer(max_features=500, stop_words="english",
ngram_range=(1, 1),
max_df=0.2)
X = vect.fit_transform(messages['sms'])
from sklearn.decomposition import LatentDirichletAllocation
lda = LatentDirichletAllocation(n_topics=3, learning_method="batch",
max_iter=32, random_state=42)
# We build the model and transform the data in one step
document_topics = lda.fit_transform(X)
# For each topic (a row in the components_), sort the features (ascending)
sorting = np.argsort(lda.components_, axis=1)[:, ::-1]
# Get the feature names from the vectorizer
feature_names = np.array(vect.get_feature_names())
# Print out the 10 topics:
mglearn.tools.print_topics(topics=range(3), feature_names=feature_names,
sorting=sorting, topics_per_chunk=5, n_words=10)
| 0.445288 | 0.917043 |
# Subbundles Part 2: Streamlines
**Subbundle** - a subgroup of streamlines with a set of common properties
Part 2: get `streamlines` and `affine`
```
from utils import get_tractogram_filename
import os.path as op
from dipy.io.streamline import load_tractogram
from AFQ import api
import AFQ.data as afd
```
## AFQ (from Part 1)
Instantiate AFQ object: `myafq` for desired dataset
```
myafq = api.AFQ(
bids_path=op.join(afd.afq_home, 'stanford_hardi'),
dmriprep='vistasoft'
)
```
## Bundles
0. Fornix (from QuickBundle example)
1. SLF
2. Corpus Callosum
3. Novel bundle
4. Whole brain
### Get Tractogram File
Name of tractogram file: `tg_fname` used for importing initial streamlines. Streamlines may represent whole brain or some subset (bundle).
### <span style="color:red">NOTE: By default use pyAFQ tractogram, otherwise:</span>
- **Enable and run cells to use Fornix from dipy**
- useful if interested in baseline comparison to QuickBundle
#### 0. Fornix
<span style="color:blue">**TODO: Verify have Tissue properties for Fornix**</span>
#### 1. SLF - *superior longitudinal fasciculus* (reproduce)
- Grotheer, M., Zhen, Z., Lerma-Usabiaga, G., & Grill-Spector, K. (2019). Separate lanes for adding and reading in the white matter highways of the human brain. Nature communications, 10(1), 1-14.
https://www.nature.com/articles/s41467-019-11424-1
- Schurr, R., Zelman, A., & Mezer, A. A. (2020). Subdividing the superior longitudinal fasciculus using local quantitative MRI. NeuroImage, 208, 116439.
https://www.sciencedirect.com/science/article/pii/S1053811919310304
- De Schotten, M. T., Dell’Acqua, F., Forkel, S., Simmons, A., Vergani, F., Murphy, D. G., & Catani, M. (2011). A lateralized brain network for visuo-spatial attention. Nature Precedings, 1-1.
https://www.nature.com/articles/npre.2011.5549.1
https://www.researchgate.net/publication/281573090_A_lateralized_brain_network_for_spatial_attention
<span style="color:blue">**TODO: Run for Left and Right SLF bundles**</span>
### <span style="color:red">NOTE: By default use SLF_L bundle, otherwise:</span>
- **Change `bundle_name`**
- pyAFQ segmentation was run with default bundles. To determine valid names can either:
- refer to [documentation](https://yeatmanlab.github.io/pyAFQ/), or
- inspect the `myafq.bundle_dict` object
```
bundle_name='SLF_L'
tg_fname = get_tractogram_filename(myafq, bundle_name)
print(bundle_name,"tractogram file:", tg_fname)
```
#### 2. Corpus callosum tract profiles (baseline)
<span style="color:blue">**TODO: Corpus callosum tractometry**</span>
- define ROIs
- use midsaggital inclusion ROI and through midline
- union of all callosum bundles
#### 3. Novel bundles (predictive)
Bundles where results are less established and more speculative
<span style="color:blue">**TODO: select existing bundles**</span>
- <span style="color:red">**Question: what bundles choose?**</span>
- Are there other bundles that would be ideal candidates? If so, why?
- What does literature say?
- Could be greedy run on all bundles defined by RECO or Waypoint ROI
- <span style="color:red">**Question: are there any bundles that do not result in subbundles?**</span>
- Or does this approach always subdivide?
- For example if recursively use outputs as inputs, will there always be more subbundles?
#### 4. Whole Brain Tractometry
<span style="color:blue">**TODO: Run on whole brain tractometry**</span>
- See whether reproduce same top level bundles
### Get Streamlines
```
tractogram = load_tractogram(tg_fname, 'same')# , bbox_valid_check=False)
streamlines = tractogram.streamlines
print("streamlines:", len(streamlines))
affine = tractogram.affine
print(affine)
```
|
github_jupyter
|
from utils import get_tractogram_filename
import os.path as op
from dipy.io.streamline import load_tractogram
from AFQ import api
import AFQ.data as afd
myafq = api.AFQ(
bids_path=op.join(afd.afq_home, 'stanford_hardi'),
dmriprep='vistasoft'
)
bundle_name='SLF_L'
tg_fname = get_tractogram_filename(myafq, bundle_name)
print(bundle_name,"tractogram file:", tg_fname)
tractogram = load_tractogram(tg_fname, 'same')# , bbox_valid_check=False)
streamlines = tractogram.streamlines
print("streamlines:", len(streamlines))
affine = tractogram.affine
print(affine)
| 0.243732 | 0.94545 |
# Lesson 1: NumPy Part 1
This notebook is based on the official `NumPy` [documentation](https://docs.scipy.org/doc/numpy/user/quickstart.html). Unless otherwise credited, quoted text comes from this document. The Numpy documention describes NumPy in the following way:
> NumPy is the fundamental package for scientific computing with Python. It contains among other things:
> - a powerful N-dimensional array object
> - sophisticated (broadcasting) functions
> - tools for integrating C/C++ and Fortran code
> - useful linear algebra, Fourier transform, and random number capabilities
>
> Besides its obvious scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data. Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases.
## Instructions
This tutorial provides step-by-step training divided into numbered sections. The sections often contain embeded exectable code for demonstration. This tutorial is accompanied by a practice notebook: [L01-Numpy_Part1-Practice.ipynb](./L01-Numpy_Part1-Practice.ipynb).
Throughout this tutorial sections labeled as "Tasks" are interspersed and indicated with the icon: . You should follow the instructions provided in these sections by performing them in the practice notebook. When the tutorial is completed you can turn in the final practice notebook.
---
## 1. Getting Started
First, we must import the NumPy library. All packages are imported at the top of the notebook. Execute the code in the following cell to get started with this notebook (type Ctrl+Enter in the cell below)
```
# Import numpy
import numpy as np
```
The code above imports numpy as a variable named `np`. We can use this variables to access the functionality of NumPy. The above is what we will use for the rest of this class.
You may be wondering why we didn't import numpy like this:
```python
import numpy
```
We could, but the first is far more commonly seen, and allows us to the `np` variable to access the functions and variables of the NumPy package. This makes the code more readable because it is not a mystery where the functions come from that we are using.
### Task 1a: Setup
<span style="float:right; margin-left:10px; clear:both;">
</span>
In the practice notebook, import the following packages:
+ `numpy` as `np`
## 2. The NumPy Array
What is an array? An array is a data structure that stores one or more objects of the same type (e.g. integers, strings, etc.) and can be multi-dimensional (e.g. 2D matricies). In python, the list data type provides this type of functionality, however, it lacks important operations that make it useful for scientific computing. Therefore, NumPy is a Python package that defines N-dimensional arrays and provides support for linear algebra, and other fucntions useful to scientific computing.
From the Numpy QuickStart Tutorial:
> NumPy’s main object is the homogeneous multidimensional array. It is a table of elements (usually numbers), all of the same type, indexed by a tuple of positive integers. In NumPy dimensions are called axes.
_Note: a "tuple" is a list of numbers. For example, the pair of numbers surrounded by parentheses: (2,4), is a tuple containing two numbers.
NumPy arrays can be visualized in the following way:
<img src="http://community.datacamp.com.s3.amazonaws.com/community/production/ckeditor_assets/pictures/332/content_arrays-axes.png">
(image source: https://www.datacamp.com/community/tutorials/python-numpy-tutorial)
Using built-in Python lists, arrays are created in the following way:
```python
# A 1-dimensional list of numbers.
my_array = [1,2,3]
# A 2-dimensional list of numbers.
my_2d_array = [[1,2,3],[4,5,6]]
# A 3-dimensional list of numbers.
my_3d_array = [[[1,2,3], [4,5,6]], [[7,8,9], [10,11,12]]]
# Two lists of boolean values
a = [True, True, False, False]
b = [False, False, True, True]
```
Using NumPy, arrays are created using the `np.array()` function. For example, arrays with the same contents as above are created in the following way:
```python
# A 1-dimensional list of numbers.
my_array = np.array([1,2,3,4])
# A 2-dimensional list of numbers.
my_2d_array = np.array([[1,2,3,4], [5,6,7,8]])
# A 3-dimensional list of numbers.
my_3d_array = np.array([[[1,2,3,4], [5,6,7,8]], [[1,2,3,4], [9,10,11,12]]])
print(my_3d_array)
# Two lists of boolean values
a = np.array([True,True,False,False])
b = np.array([False,False,True,True])
```
In NumPy, these arrays are an object of type `ndarray`. You can learn more about the `ndarray` class on the [NumPy ndarray introduction page](https://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html). However, this tutorial will walk you through some of the most important attributes, functions and uses of NumPy.
### Task 2a: Creating Arrays
<span style="float:right; margin-left:10px; clear:both;">
</span>
In the practice notebook, perform the following.
- Create a 1-dimensional numpy array and print it.
- Create a 2-dimensional numpy array and print it.
- Create a 3-dimensional numpy array and print it.
## 3. Accessing Array Attributes
For this section we will retrieve information about the arrays. Once an array is created you can access information about the array such as the number of dimensions, its shape, its size, the data type that it stores, and the number of bytes it is consuming. There are a variety of attributes you can use such as:
+ `ndim`
+ `shape`
+ `size`
+ `dtype`
+ `itemsize`
+ `data`
+ `nbytes`
For example, to get the number of dimensions for an array:
```Python
# Print the number of dimensions for the array:
print(my_3d_array.ndim)
```
You can learn more about these attributes, and others from the [NumPy ndarray reference page](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html) if you need help understanding the attributes.
Notice that we use dot notation to access these attributes, yet we do not provide the parenthesis `()` like we would for a function call. This is because we are accessing attributes (i.e. member variables) of the numpy object, we are not calling a function
### Task 3a: Accessing Array Attributes
<span style="float:right; margin-left:10px; clear:both;">
</span>
In the practice notebook, perform the following.
- Create a NumPy array.
- Write code that prints these attributes (one per line): `ndim`, `shape`, `size`, `dtype`, `itemsize`, `data`, `nbytes`.
- Add a comment line, before each line describing what value the attribute returns.
## 4. Creating Initialized Arrays
Here we will learn to create initialized arrays. These arrays are pre-initalized with default values. NumPy provides a variety of functions for creating and intializing an array in easy-to-use functions. Some of these include:
+ [np.ones()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.html#numpy.ones): Returns a new array of given shape and type, filled with ones.
+ [np.zeroes()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html#numpy.zeros): Returns a new array of given shape and type, filled with zeros.
+ [np.empty()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.empty.html#numpy.empty): Return a new array of given shape and type, without initializing entries.
+ [np.full()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html#numpy.full): Returns a new array of given shape and type, filled with a given fill value.
+ [np.arange()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html#numpy.arange): Returns a new array of evenly spaced values within a given interval.
+ [np.linspace()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html#numpy.linspace): Returns a new array of evenly spaced numbers over a specified interval.
+ [np.random.random](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.random.random.html): Can be used to return a single random value or an array of random values between 0 and 1.
Take a moment, to learn more about the functions listed above by clicking on the function name as it links to the NumPy documentation. Pay attention to the arguments that each receives and the type of output (i.e array) it generates.
NumPy has a large list of array creation functions, you can learn more about these functions on the [array creation routins page](https://docs.scipy.org/doc/numpy/reference/routines.array-creation.html) of the NumPy documentation.
To demonstrate the use of these functions, the following code will create a two-dimensional array with 3 rows and 4 columns (i.e 3 *x* 4) filled with 0's.
```Python
zeros = np.zeros((3, 4))
```
The following creates a 1D array of values between 3 and 7
```Python
np.arange(3, 7)
```
The result is: `array([3, 4, 5, 6])`
The following creates a 1D array of values between 0 and 10 spaced every 2 integers:
```Python
np.arange(0, 10, 2)
```
The result is: `array([0, 2, 4, 6, 8])`
Notice that just like with Python list slicing, the range uncludes up-to, but not including the "stop" value of the range.
### Task 4a: Initializing Arrays
<span style="float:right; margin-left:10px; clear:both;">
</span>
In the practice notebook, perform the following.
+ Create an initialized array by using these functions: `ones`, `zeros`, `empty`, `full`, `arange`, `linspace` and `random.random`. Be sure to follow each array creation with a call to `print()` to display your newly created arrays.
+ Add a comment above each function call describing what is being done.
## 5. Performing Math and Broadcasting
At times you may want to apply mathematical operations between arrays. For example, suppose you wanted to add, multiply or divide the contents of two arrays. If the two arrays are the same size this is straightfoward. However if the arrays are not the same size then it is more challenging. This is where Broadcasting comes to play:
> The term broadcasting describes how numpy treats arrays with different shapes during arithmetic operations. Subject to certain constraints, the smaller array is “broadcast” across the larger array so that they have compatible shapes. (https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
### 5.1 Arrays of the same size
To demonstrate math with arrays of the same size, the following cell contains code that creates two arrays of the exact same size: _3 x 4_. Execute the cell to create those arrays:
```
# Define demo arrays:
demo_a = np.ones((3,4))
demo_b = np.random.random((3,4))
# Print the shapes of each array.
print(f"demo_a shape: {demo_a.shape}")
print(f"demo_b Shape: {demo_b.shape}")
```
Let's print the array to see what they contain:
```
print(demo_a)
print(demo_b)
```
Because these arrays are the same size we can perform basic math by using common arithamtic symbols. Exectue the following cell to see the results of adding the two demo arrays:
```
# These arrays have the same shape,
demo_a + demo_b
```
The addition resulted in the corresponding positions in each matrix being added to the other and creating a new matrix. If you need clarification for how two matricies can be added or subtracted see the [Purple Math](https://www.purplemath.com/modules/mtrxadd.htm) site for examples.
### 5.2 Broadcasting for Arrays of Different Sizes
When arrays are not the same size, you cannot perform simple math. For this, NumPy provides a service known as "broadcasting". To broadcast, NumPy automatically resizes the arrays to match, and fills in newly created empty cells with values.
To Broadcast, NumPy begins at the right-most dimensions of the array and comparses them then moves left and compares the next set. As long as each set meet the following criteria, Broadcasting can be performed:
+ The dimensions are equal or
+ One of the dimensions is 1.
Consider two arrays of the following dimensions:
+ 4D array 1: 10 x 1 x 3 x 1
+ 3D array 2: 2 x 1 x 9
These arrays are not the same size, but they are compatible with broadcasting because at each diemsion (from right to left) the dimension crtieria is met. When performing math, the value in each dimension of size 1 is broadcast to fill that dimesion (an example is provided below). The resulting array, if the above arrays are added, will be broadcasted to a size of _10 x 2 x 3 x 9_
To demonstrate math with arrays of different size, the following cell contains code that creates two arrays: one of size _3 x 4_ and onther of size _4 x 1_. Execute the cell to create those arrays:
```
# Create the arrays.
demo_c = np.ones((3,4))
demo_d = np.arange(4)
# Print the array shapes.
print(f"demo_c shape: {demo_c.shape}")
print(f"demo_d Shape: {demo_d.shape}")
```
Let's print the array to see what they contain:
```
print(demo_c)
print(demo_d)
```
Because these arrays meet our brodcasting requirements, we can perform basic math by using common arithamtic symbols. Exectue the following cell to see the results of adding the two demo arrays:
```
demo_c + demo_d
```
The addition resulted in the value in each dimension of size 1, being "broadcast" or "streched" throughout that dimesion and then used in the operation.
### 5.3 Broadcasting With Higher Dimensions
Consider the following arrays of 2 and 3 dimensions.
```
demo_e = np.ones((3, 4))
demo_f = np.random.random((5, 1, 4))
print(f"demo_e shape: {demo_e.shape}")
print(f"demo_f shape: {demo_f.shape}")
```
Print the arrays to see what they contain:
```
print(demo_e)
print(demo_f)
```
These two arrays meet the rules for broadcasting becuase they both have a 4 in their last dimension and there is a 1 in the `demo_f` 2nd dimension.
Perform the math by executing the following cell:
```
result = demo_e + demo_f
print(result)
```
The resulting array has dimensions of _5 x 3 x 4_. For this math to work, the values from `demo_f` had to be "stretched" (i.e. copied and then added) in the second dimension
### Task 5a: Broadcasting Arrays
<span style="float:right; margin-left:10px; clear:both;">
</span>
In the practice notebook, perform the following.
+ Create two arrays of differing sizes but compatible with broadcasting.
+ Perform addition, multiplication and subtraction.
+ Create two additional arrays of differing size that do not meet the rules for broadcasting and try a mathematical operation.
## 6. NumPy Aggregate Functions
NumPy also provides a variety of functions that "aggregate" data. Examples of aggreagation of data include calculating the sum of every element in the array, calculating the mean, standard deviation, etc. Below are a few examples of aggregation functions provided by NumPy.
**Mathematics Functions**:
+ [np.sum()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html): sums the array elements over a given axis
+ [np.minimum()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.minimum.html#numpy.minimum): compares two arrays and returns a new array of the minimum at each position (i.e. element-wise)
+ [np.maximum()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.maximum.html#numpy.maximum): compares two arrays and returns a new array of the maximum at each position (i.e. element-wise).
+ [np.cumsum()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.cumsum.html#numpy.cumsum): returns the cummulative sum of the elements along a given axes.
You can find more about mathematical functions for arrays at the [Numpy mathematical functions page](https://docs.scipy.org/doc/numpy/reference/routines.math.html).
**Statistics**:
+ [np.mean()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html): compute the arithmetic mean along the specified axis.
[np.median()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.median.html#numpy.median): compute the median along the specified axis.
+ [np.corrcoef()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.corrcoef.html#numpy.corrcoef): return Pearson product-moment correlation coefficients between two 1D arrays or one 2D array.
+ [np.std()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html#numpy.std): compute the standard deviation along the specified axis.
+ [np.var()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.var.html#numpy.var): compute the variance along the specified axis.
You can find more about statistical functions for arrays at the [Numpy statistical functions page](https://docs.scipy.org/doc/numpy/reference/routines.statistics.html).
Take a moment, to learn more about the functions listed above by clicking on the function name as it links to the NumPy documentation. Pay attention to the arguments that each receives and the type of output it generates.
For example:
```Python
# Calculate the sum of our demo data from above
np.sum(demo_e)
```
### Task 6a: Math/Stats Aggregate Functions
<span style="float:right; margin-left:10px; clear:both;">
</span>
In the practice notebook, perform the following.
+ Create three to five arrays
+ Experiment with each of the aggregation functions: `sum`, `minimum`, `maximum`, `cumsum`, `mean`, `np.corrcoef`, `np.std`, `np.var`.
+ For each function call, add a comment line above it that describes what it does.
### 6.1 Logical Aggregate Functions
When arrays contain boolean values there are additional logical aggregation functions you can use:
+ [logical_and()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.logical_and.html#numpy.logical_and): computes the element-wise truth value of two arrays using AND.
+ [logical_or()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.logical_or.html#numpy.logical_or): computes the element-wise truth value of two arrays using OR.
+ [logical_not()](https://docs.scipy.org/doc/numpy/reference/generated/numpy.logical_not.html#numpy.logical_not): computes the element-wise truth value of two arrays using NOT.
You can find more about logical functions for arrays at the [Numpy Logic functions page](https://docs.scipy.org/doc/numpy/reference/routines.logic.html).
Take a moment, to learn more about the functions listed above by clicking on the function name as it links to the NumPy documentation. Pay attention to the arguments that each receives and the type of output it generates.
To demonstrate usage of the logical functions, please execute the following cells and examine the results produced.
```
# Two lists of boolean values
a = [True, True, False, False]
b = [False, False, True, True]
# Perform a logical "or":
np.logical_or(a, b)
# Perform a logical "and":
np.logical_or(a, b)
```
### Task 6b: Logical Aggregate Functions
<span style="float:right; margin-left:10px; clear:both;">
</span>
In the practice notebook, perform the following.
+ Create two arrays containing boolean values.
+ Experiment with each of the aggregation functions: `logical_and`, `logical_or`, `logical_not`.
+ For each function call, add a comment line above it that describes what it does.
|
github_jupyter
|
# Import numpy
import numpy as np
import numpy
# A 1-dimensional list of numbers.
my_array = [1,2,3]
# A 2-dimensional list of numbers.
my_2d_array = [[1,2,3],[4,5,6]]
# A 3-dimensional list of numbers.
my_3d_array = [[[1,2,3], [4,5,6]], [[7,8,9], [10,11,12]]]
# Two lists of boolean values
a = [True, True, False, False]
b = [False, False, True, True]
# A 1-dimensional list of numbers.
my_array = np.array([1,2,3,4])
# A 2-dimensional list of numbers.
my_2d_array = np.array([[1,2,3,4], [5,6,7,8]])
# A 3-dimensional list of numbers.
my_3d_array = np.array([[[1,2,3,4], [5,6,7,8]], [[1,2,3,4], [9,10,11,12]]])
print(my_3d_array)
# Two lists of boolean values
a = np.array([True,True,False,False])
b = np.array([False,False,True,True])
# Print the number of dimensions for the array:
print(my_3d_array.ndim)
zeros = np.zeros((3, 4))
np.arange(3, 7)
np.arange(0, 10, 2)
# Define demo arrays:
demo_a = np.ones((3,4))
demo_b = np.random.random((3,4))
# Print the shapes of each array.
print(f"demo_a shape: {demo_a.shape}")
print(f"demo_b Shape: {demo_b.shape}")
print(demo_a)
print(demo_b)
# These arrays have the same shape,
demo_a + demo_b
# Create the arrays.
demo_c = np.ones((3,4))
demo_d = np.arange(4)
# Print the array shapes.
print(f"demo_c shape: {demo_c.shape}")
print(f"demo_d Shape: {demo_d.shape}")
print(demo_c)
print(demo_d)
demo_c + demo_d
demo_e = np.ones((3, 4))
demo_f = np.random.random((5, 1, 4))
print(f"demo_e shape: {demo_e.shape}")
print(f"demo_f shape: {demo_f.shape}")
print(demo_e)
print(demo_f)
result = demo_e + demo_f
print(result)
# Calculate the sum of our demo data from above
np.sum(demo_e)
# Two lists of boolean values
a = [True, True, False, False]
b = [False, False, True, True]
# Perform a logical "or":
np.logical_or(a, b)
# Perform a logical "and":
np.logical_or(a, b)
| 0.53777 | 0.990821 |
```
# Copyright 2021 Google LLC
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
# Author(s): Kevin P. Murphy (murphyk@gmail.com) and Mahmoud Soliman (mjs@aucegypt.edu)
```
<a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a>
<a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/figures/chapter16_exemplar-based_methods_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Figure 16.1:<a name='16.1'></a> <a name='knn'></a>
(a) Illustration of a $K$-nearest neighbors classifier in 2d for $K=5$. The nearest neighbors of test point $\mathbf x $ have labels $\ 1, 1, 1, 0, 0\ $, so we predict $p(y=1|\mathbf x , \mathcal D ) = 3/5$. (b) Illustration of the Voronoi tesselation induced by 1-NN. Adapted from Figure 4.13 of <a href='#Duda01'>[DHS01]</a> .
Figure(s) generated by [knn_voronoi_plot.py](https://github.com/probml/pyprobml/blob/master/scripts/knn_voronoi_plot.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/knn_voronoi_plot.py")
```
## Figure 16.2:<a name='16.2'></a> <a name='knnThreeClass'></a>
Decision boundaries induced by a KNN classifier. (a) $K=1$. (b) $K=2$. (c) $K=5$. (d) Train and test error vs $K$.
Figure(s) generated by [knn_classify_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/knn_classify_demo.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/knn_classify_demo.py")
```
## Figure 16.3:<a name='16.3'></a> <a name='curse'></a>
Illustration of the curse of dimensionality. (a) We embed a small cube of side $s$ inside a larger unit cube. (b) We plot the edge length of a cube needed to cover a given volume of the unit cube as a function of the number of dimensions. Adapted from Figure 2.6 from <a href='#HastieBook'>[HTF09]</a> .
Figure(s) generated by [curse_dimensionality.py](https://github.com/probml/pyprobml/blob/master/scripts/curse_dimensionality.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/curse_dimensionality.py")
```
## Figure 16.4:<a name='16.4'></a> <a name='LCA'></a>
Illustration of latent coincidence analysis (LCA) as a directed graphical model. The inputs $\mathbf x , \mathbf x ' \in \mathbb R ^D$ are mapped into Gaussian latent variables $\mathbf z , \mathbf z ' \in \mathbb R ^L$ via a linear mapping $\mathbf W $. If the two latent points coincide (within length scale $\kappa $) then we set the similarity label to $y=1$, otherwise we set it to $y=0$. From Figure 1 of <a href='#Der2012'>[ML12]</a> . Used with kind permission of Lawrence Saul.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.4.png")
```
## Figure 16.5:<a name='16.5'></a> <a name='tripletNet'></a>
Networks for deep metric learning. (a) Siamese network. (b) Triplet network. Adapted from Figure 5 of <a href='#Kaya2019'>[MH19]</a> .
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.5_A.png")
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.5_B.png")
```
## Figure 16.6:<a name='16.6'></a> <a name='tripletBound'></a>
Speeding up triplet loss minimization. (a) Illustration of hard vs easy negatives. Here $a$ is the anchor point, $p$ is a positive point, and $n_i$ are negative points. Adapted from Figure 4 of <a href='#Kaya2019'>[MH19]</a> . (b) Standard triplet loss would take $8 \times 3 \times 4 = 96$ calculations, whereas using a proxy loss (with one proxy per class) takes $8 \times 2 = 16$ calculations. From Figure 1 of <a href='#Do2019cvpr'>[Tha+19]</a> . Used with kind permission of Gustavo Cerneiro.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.6_A.png")
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.6_B.png")
```
## Figure 16.7:<a name='16.7'></a> <a name='SEC'></a>
Adding spherical embedding constraint to a deep metric learning method. Used with kind permission of Dingyi Zhang.
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.7.png")
```
## Figure 16.8:<a name='16.8'></a> <a name='smoothingKernels'></a>
A comparison of some popular normalized kernels.
Figure(s) generated by [smoothingKernelPlot.py](https://github.com/probml/pyprobml/blob/master/scripts/smoothingKernelPlot.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/smoothingKernelPlot.py")
```
## Figure 16.9:<a name='16.9'></a> <a name='parzen'></a>
A nonparametric (Parzen) density estimator in 1d estimated from 6 data points, denoted by x. Top row: uniform kernel. Bottom row: Gaussian kernel. Left column: bandwidth parameter $h=1$. Right column: bandwidth parameter $h=2$. Adapted from http://en.wikipedia.org/wiki/Kernel_density_estimation .
Figure(s) generated by [Kernel_density_estimation](http://en.wikipedia.org/wiki/Kernel_density_estimation) [parzen_window_demo2.py](https://github.com/probml/pyprobml/blob/master/scripts/parzen_window_demo2.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/parzen_window_demo2.py")
```
## Figure 16.10:<a name='16.10'></a> <a name='kernelRegression'></a>
An example of kernel regression in 1d using a Gaussian kernel.
Figure(s) generated by [kernelRegressionDemo.py](https://github.com/probml/pyprobml/blob/master/scripts/kernelRegressionDemo.py)
```
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/kernelRegressionDemo.py")
```
## References:
<a name='Duda01'>[DHS01]</a> R. O. Duda, P. E. Hart and D. G. Stork. "Pattern Classification". (2001).
<a name='HastieBook'>[HTF09]</a> T. Hastie, R. Tibshirani and J. Friedman. "The Elements of Statistical Learning". (2009).
<a name='Kaya2019'>[MH19]</a> K. Mahmut and B. HasanSakir. "Deep Metric Learning: A Survey". In: Symmetry (2019).
<a name='Der2012'>[ML12]</a> D. Matthew and S. LawrenceK. "Latent Coincidence Analysis: A Hidden Variable Model forDistance Metric Learning". (2012).
<a name='Do2019cvpr'>[Tha+19]</a> D. Thanh-Toan, T. Toan, R. Ian, K. Vijay, H. Tuan and C. Gustavo. "A Theoretically Sound Upper Bound on the Triplet Loss forImproving the Efficiency of Deep Distance Metric Learning". (2019).
|
github_jupyter
|
# Copyright 2021 Google LLC
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
# Author(s): Kevin P. Murphy (murphyk@gmail.com) and Mahmoud Soliman (mjs@aucegypt.edu)
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/knn_voronoi_plot.py")
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/knn_classify_demo.py")
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/curse_dimensionality.py")
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.4.png")
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.5_A.png")
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.5_B.png")
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.6_A.png")
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.6_B.png")
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_image("/pyprobml/book1/figures/images/Figure_16.7.png")
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/smoothingKernelPlot.py")
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/parzen_window_demo2.py")
#@title Setup { display-mode: "form" }
%%time
# If you run this for the first time it would take ~25/30 seconds
!git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/probml/colab_powertoys.git &> /dev/null
!pip3 install nbimporter -qqq
%cd -q /content/colab_powertoys
from colab_powertoys.probml_toys import probml_toys as pmlt
%cd -q /pyprobml/scripts
pmlt.show_and_run("/pyprobml/scripts/kernelRegressionDemo.py")
| 0.691602 | 0.933975 |
# Modelo del rendimiento de una cuenta de ahorro
<img style="center" src="https://static.pexels.com/photos/9660/business-money-pink-coins.jpg" width="500px" height="200px" alt="atom"/>
> **¿Tiene el dinero el mismo valor a lo largo del tiempo?** La respuesta es *no*. Todos lo hemos vivido.
> Dos situaciones básicas:
1. <font color=blue>Inflación</font>: ¿Cuánto dinero necesitabas para comprar unas papas y un refresco hace 10 años? ¿Cuánto necesitas hoy?
2. <font color=blue>Interés</font>: no es lo mismo tener $\$10000$ MXN disponibles hoy a recibir $\$10000$ MXN en un año, pues los primeros pueden ser invertidos en un negocio o una cuenta bancaria para generar **interés**. Por lo tanto los $\$10000$ MXN disponibles hoy valen más que los $\$10000$ MXN que se recibirán en un año.
Referencia:
- Vidaurri Aguirre, Héctor Manuel. *Ingeniería económica básica*, ISBN: 978-607-519-017-4. (Disponible en biblioteca)
Referencias:
- http://www.sympy.org
- http://matplotlib.org
- http://www.numpy.org
- http://ipywidgets.readthedocs.io/en/latest/index.html
___
## Interés
Nos centraremos en como cambia el valor del dinero en el tiempo debido al **interés**. Existen dos tipos:
### Capitalización por interés simple
Este tipo de interés se calcula <font color=red>única y exclusivamente sobre la cantidad original que se invirtió</font>. Como consecuencia, el interés generado no forma parte del dinero que se invierte, es decir, los <font color=blue>intereses no ganan intereses</font>.
Suponga que se tiene un capital inicial $C_0$ y se invierte a un plazo de $k$ periodos (pueden ser meses, trimestres, semestres, años...) a una tasa de **interés simple** por periodo $i$. Al final del primer periodo, el capital $C_1$ que se obtiene es:
$$C_1=C_0+iC_0=C_0(1+i).$$
De la misma manera, como el interés solo se calcula sobre el capital inicial, al final del segundo periodo, el capital $C_2$ que se obtiene es:
$$C_2=C_1+iC_0=C_0+iC_0+iC_0=C_0(1+2i).$$
Así, al final del $k-$ésimo periodo, el capital $C_k$ que se obtiene es:
$$C_k=C_{k-1}+iC_0=C_0+kiC_0=C_0(1+ki).$$
> **Ejemplo.** Suponga que se tiene un capital de $\$10000$ MXN, el cual se pone en un fondo de inversión que paga una tasa de interés simple del $0.8\%$ mensual.
> Si se tiene una meta de ahorro de $\$11000$ MXN sin inversiones adicionales, ¿cuántos meses se debería dejar invertido el dinero?
```
# Importar librería numpy
import numpy as np
# Datos del problema
C_0, goal, i = 10000, 11000, 0.008
# Despejar k tal que C_k=meta
k = np.ceil(((goal/C_0)-1)/i)
k = k.astype(int)
C_k = (1+k*i)*C_0
C_k = round(C_k,2)
# Imprimimos respuesta en pantalla
print("El número de periodos requeridos para alcanzar la meta son ",k,
". La cantidad de dinero al final del período ",k,", es ",C_k,sep="")
```
> <font color=blue>**Actividad.**</font>
1. ¿Qué pasa si el interés no es del $0.8\%$ mensual sino del $1\%$ mensual?
2. ¿Qué pasa si la meta no son $\$11000$ MXN si no $\$12000$ MXN?
```
# Solución 1
# Datos del problema
C_0, goal, i = 10000, 11000, 0.01
# Despejar k tal que C_k=meta
k = np.ceil(((goal/C_0)-1)/i)
k = k.astype(int)
C_k = (1+k*i)*C_0
C_k = round(C_k,2)
# Imprimimos respuesta en pantalla
print("El número de periodos requeridos para alcanzar la meta son ",k,
". La cantidad de dinero al final del período ",k,", es ",C_k,sep="")
# Solución 2
# Datos del problema
C_0, goal, i = 10000, 12000, 0.008
# Despejar k tal que C_k=meta
k = np.ceil(((goal/C_0)-1)/i)
k = k.astype(int)
C_k = (1+k*i)*C_0
C_k = round(C_k,2)
# Imprimimos respuesta en pantalla
print("El número de periodos requeridos para alcanzar la meta son ",k,
". La cantidad de dinero al final del período ",k,", es ",C_k,sep="")
```
> Una gráfica que nos permite ilustrar la situación anterior se puede realizar de la siguiente manera.
```
# Librerías para gráficos
import matplotlib.pyplot as plt
# Para que se muestren las gráficas en la misma ventana
%matplotlib inline
# Librería para widgets de jupyter
from ipywidgets import *
def interes_simple(C_0, meta, i):
# Despejamos k
k = np.ceil((meta/C_0 - 1)/i) # Notar el uso de la función ceil
k = k.astype(int) # Conversión a entero
C_k = C_0*(1+k*i) # Cálculo del capital al final del periodo k
C_k = round(C_k, 2) # Redondeo con dos cifras decimales
# Vector de periodos
kk = np.linspace(0,k,k+1)
# Vector de capitales por periodo
CC = C_0*(1+kk*i)
# Gráfico
plt.figure(num=1); plt.clf() # Figura 1, borrar lo que contenga
plt.plot(kk, CC,'*',ms=10) # Se grafica la evolución de los capitales
plt.plot(kk,meta*np.ones(k+1),'--k') # Se grafica la meta
plt.xlabel('$k$',fontsize=18) # Etiqueta eje x
plt.ylabel('$C_k$',fontsize=18) # Etiqueta eje y
plt.grid(True) # Malla en la gráfica
plt.show() # Mostrar la figura
print("El número de periodos que se debe dejar invertido el dinero para llegar a la meta de ", meta," es ", k,
". Al final del periodo ", k,", el capital es ", C_k, ".", sep="")
interact_manual(interes_simple, C_0=fixed(10000), meta=(10000,12000,100), i=fixed(0.008));
```
Como se esperaba, el capital en el $k-$ésimo periodo $C_k=C_0(1+ki)$ crece linealmente con $k$.
### Capitalización por interés compuesto
El capital que genera el interés simple permanece constante todo el tiempo de duración de la inversión. En cambio, el que produce el interés compuesto en un periodo se <font color=red>convierte en capital en el siguiente periodo</font>. Esto es, el interés generado al final de un periodo <font color=blue>se reinvierte para el siguiente periodo para también producir interés</font>.
Suponga que se tiene un capital inicial $C_0$, y se va a ceder el uso de este capital por un periodo de tiempo determinado a una tasa de interés $i$. El capital que se obtiene al final del primer periodo $C_1$ se puede calcular por
$$C_1=C_0(1+i).$$
Si la anterior suma se vuelve a ceder a la misma tasa de interés, al final del periodo dos el capital $C_2$ es
$$C_2=C_1(1+i)=C_0(1+i)^2.$$
Si se repite el anterior proceso $k$ veces, el capital al final del $k-$ésimo periodo $C_k$ es
$$C_k=C_{k-1}(1+i)=C_0(1+i)^k.$$
**Referencia**:
- https://es.wikipedia.org/wiki/Inter%C3%A9s_compuesto.
> **Ejemplo.** Suponga que se tiene un capital de $\$10000$ MXN, el cual se pone en un fondo de inversión que paga una tasa de interés del $0.8\%$ mensual.
> Si se tiene una meta de ahorro de $\$11000$ MXN sin inversiones adicionales, ¿cuántos meses se debería dejar invertido el dinero?
```
#Solución
C_0, goal, i = 10000, 11000, 0.008
k = np.ceil(np.log(goal/C_0)/np.log(1+i))
k = k.astype(int)
C_k = ((1+i)**k)*C_0
C_k = round(C_k,2)
print("El número de periodos requeridos para alcanzar la meta son ",k,
". La cantidad de dinero al final del período ",k,", es ",C_k,sep="")
```
> Una gráfica que nos permite ilustrar la situación anterior se puede realizar de la siguiente manera.
```
def interes_compuesto(C_0, meta, i):
# Despejamos k
k = np.ceil(np.log(meta/C_0)/np.log(1+i))
k = k.astype(int)
C_k = C_0*(1+i)**k # Cálculo del capital al final del periodo k
# Vector de periodos
kk = np.linspace(0,k,k+1)
# Vector de capitales por periodo
CC = C_0*(1+i)**kk
# Gráfico
plt.figure(num=1); plt.clf() # Figura 1, borrar lo que contenga
plt.plot(kk, CC,'*',linewidth=3.0) # Se grafica la evolución de los capitales
plt.plot(kk,meta*np.ones(k+1),'--k') # Se grafica la meta
plt.xlabel('$k$',fontsize=18) # Etiqueta eje x
plt.ylabel('$C_k$',fontsize=18) # Etiqueta eje y
plt.grid(True) # Malla en la gráfica
plt.show() # Mostrar la figura
print("El número de periodos que se debe dejar invertido el dinero para llegar a la meta de ", meta," es ", k,
". Al final del periodo ", k,", el capital es ", C_k, ".", sep="")
interact_manual(interes_compuesto, C_0=fixed(10000), meta=(10000,20000,100), i=fixed(0.008));
```
El capital en el $k-$ésimo periodo $C_k=C_0(1+i)^k$ crece de manera exponencial con $k$.
> <font color=blue>**Actividad.**</font>
> - Modificar el código anterior para dejar fija la meta de ahorro y variar la tasa de interés compuesta.
```
interact_manual(interes_compuesto, C_0=fixed(10000), meta=fixed(11000), i=(0.01,0.05,0.01));
interact_manual(interes_compuesto, C_0=(10000,15000,1000), meta=fixed(18000), i=fixed(0.008));
```
### Capitalización continua de intereses
La capitalización continua se considera un tipo de capitalización compuesta, en la que a cada instante de tiempo $t$ se se capitalizan los intereses. Es decir, la frecuencia de capitalización es infinita (o, equivalentemente, el periodo de capitalización tiende a cero).
Suponga que se tiene un capital inicial $C_0$, y que el capital acumulado en el tiempo $t$ es $C(t)$. Queremos saber cuanto será el capital pasado un periodo de tiempo $\Delta t$, dado que la tasa de interés efectiva para este periodo de tiempo es $i$. De acuerdo a lo anterior tenemos
$$C(t+\Delta t)=C(t)(1+i)=C(t)(1+r\Delta t),$$
donde $r=\frac{i}{\Delta t}$ es la tasa de interés instantánea. Manipulando la anterior expresión, obtenemos
$$\frac{C(t+\Delta t)-C(t)}{\Delta t}=r\; C(t).$$
Haciendo $\Delta t\to 0$, obtenemos la siguiente ecuación diferencial
$$\frac{d C(t)}{dt}=r\; C(t),$$
sujeta a la condición inicial (monto o capital inicial) $C(0)=C_0$.
La anterior, es una ecuación diferencial lineal de primer orden, para la cual se puede calcular la *solución analítica*.
$$C(t) = C_0e^{rt}$$
La equivalencia entre la tasa de interés compuesta $i$ y la tasa de interés instantánea $r$ viene dada por
$$e^r=1+i.$$
___
¿Cómo podemos calcular la *solución numérica*?
> **Ejemplo.** Suponga que se tiene un capital de $\$10000$ MXN, el cual se pone en un fondo de inversión que paga una tasa de interés del $0.8\%$ mensual.
> Si se tiene una meta de ahorro de $\$11000$ MXN sin inversiones adicionales, ¿cuánto tiempo se debe dejar invertido el dinero?
> Muestre una gráfica que ilustre la situación.
```
# Librerías para integración numérica
from scipy.integrate import odeint
# Modelo de capitalización continua
def cap_continuo(C,t,r):
return r*C
def interes_continuo(C_0, meta, r):
# Despejamos t
t = np.log(meta/C_0)/r
# Vector de periodos
tt = np.linspace(0,t,100)
# Vector de capitales por periodo
CC = odeint(cap_continuo,C_0,tt,args=(r,))
# Gráfico
plt.figure(num=1); plt.clf() # Figura 1, borrar lo que contenga
plt.plot(tt, CC,'-',linewidth=3.0) # Se grafica la evolución de los capitales
plt.plot(tt,meta*np.ones(len(tt)),'--k') # Se grafica la meta
plt.xlabel('t') # Etiqueta eje x
plt.ylabel('C(t)') # Etiqueta eje y
plt.grid(True) # Malla en la gráfica
plt.show() # Mostrar la figura
print("El tiempo que se debe dejar invertido el dinero para llegar a la meta de ", meta," es ", t, " meses.", sep="")
interact_manual(interes_continuo, C_0=fixed(10000), meta=(11000,20000,100), r=fixed(np.log(1+i)));
```
___
## Tabla de abonos
Como aplicación importante del concepto de interés compuesto se encuentra la creación de un modelo de cuenta de ahorro.
Referencia:
- Vidaurri Aguirre, Héctor Manuel. *Ingeniería económica básica*, ISBN: 978-607-519-017-4. (Disponible en biblioteca)
- http://pbpython.com/amortization-model.html
- https://pbpython.com/amortization-model-revised.html
```
# Librería de análisis de datos
import pandas as pd
# Librería para manipulación de fechas
from datetime import date
# Datos para la cuenta de ahorro
Tasa_interes = 0.005
Anos = 3
Abonos_ano = 12
Inicial = 1000
Meta = 80000
fecha_inicial = (date(2019,5,2))
# Cálculo de abonos mensuales iguales para llegar a la meta de ahorro en el tiempo deseado
Abono = -np.pmt(Tasa_interes/Abonos_ano,Anos*Abonos_ano,(Meta-Inicial))
# Rango de fechas en la tabla
rng = pd.date_range(fecha_inicial,periods = Anos*Abonos_ano,freq='MS')
rng.name = "Fecha del abono"
# Columnas de abonos y extracto de cuenta en las fechas respectivas
df = pd.DataFrame(index=rng,columns=['Abono','Balance'],dtype='float')
# Para poner el indice con los periodos y no con las fechas
df.reset_index(inplace=True)
df.index+=1
df.index.name = "Período" # Columna correspondiente al periodo
# Los abonos son iguales
df["Abono"] = Abono
# Vector indizador de los periodos de los abonos
index_vector = np.arange(1,len(rng)+1)
# Extracto de la cuenta mes a mes
df["Balance"] = Inicial*(1+Tasa_interes/Abonos_ano)**index_vector+Abono*(((1+Tasa_interes/Abonos_ano)**index_vector-1)/(Tasa_interes/Abonos_ano))
df = df.round(2)
df
# Redondear con dos cifras decimales y mostrar DataFrame
```
## Tarea.
> - Consultar tasas de interés reales en algún banco y proyectar un ahorro mensual para que al terminar su carrera tengan $\$45000$ MXN en su cuenta.
> - Hacer esto en un nuevo archivo, llamarlo Tarea6_ApellidoNombre.ipynb y subirlo a moodle.
> - Plazo: Lunes 6 de Mayo a las 13:00.
<script>
$(document).ready(function(){
$('div.prompt').hide();
$('div.back-to-top').hide();
$('nav#menubar').hide();
$('.breadcrumb').hide();
$('.hidden-print').hide();
});
</script>
<footer id="attribution" style="float:right; color:#808080; background:#fff;">
Created with Jupyter by Cristian Camilo Zapata Zuluaga.
</footer>
|
github_jupyter
|
# Importar librería numpy
import numpy as np
# Datos del problema
C_0, goal, i = 10000, 11000, 0.008
# Despejar k tal que C_k=meta
k = np.ceil(((goal/C_0)-1)/i)
k = k.astype(int)
C_k = (1+k*i)*C_0
C_k = round(C_k,2)
# Imprimimos respuesta en pantalla
print("El número de periodos requeridos para alcanzar la meta son ",k,
". La cantidad de dinero al final del período ",k,", es ",C_k,sep="")
# Solución 1
# Datos del problema
C_0, goal, i = 10000, 11000, 0.01
# Despejar k tal que C_k=meta
k = np.ceil(((goal/C_0)-1)/i)
k = k.astype(int)
C_k = (1+k*i)*C_0
C_k = round(C_k,2)
# Imprimimos respuesta en pantalla
print("El número de periodos requeridos para alcanzar la meta son ",k,
". La cantidad de dinero al final del período ",k,", es ",C_k,sep="")
# Solución 2
# Datos del problema
C_0, goal, i = 10000, 12000, 0.008
# Despejar k tal que C_k=meta
k = np.ceil(((goal/C_0)-1)/i)
k = k.astype(int)
C_k = (1+k*i)*C_0
C_k = round(C_k,2)
# Imprimimos respuesta en pantalla
print("El número de periodos requeridos para alcanzar la meta son ",k,
". La cantidad de dinero al final del período ",k,", es ",C_k,sep="")
# Librerías para gráficos
import matplotlib.pyplot as plt
# Para que se muestren las gráficas en la misma ventana
%matplotlib inline
# Librería para widgets de jupyter
from ipywidgets import *
def interes_simple(C_0, meta, i):
# Despejamos k
k = np.ceil((meta/C_0 - 1)/i) # Notar el uso de la función ceil
k = k.astype(int) # Conversión a entero
C_k = C_0*(1+k*i) # Cálculo del capital al final del periodo k
C_k = round(C_k, 2) # Redondeo con dos cifras decimales
# Vector de periodos
kk = np.linspace(0,k,k+1)
# Vector de capitales por periodo
CC = C_0*(1+kk*i)
# Gráfico
plt.figure(num=1); plt.clf() # Figura 1, borrar lo que contenga
plt.plot(kk, CC,'*',ms=10) # Se grafica la evolución de los capitales
plt.plot(kk,meta*np.ones(k+1),'--k') # Se grafica la meta
plt.xlabel('$k$',fontsize=18) # Etiqueta eje x
plt.ylabel('$C_k$',fontsize=18) # Etiqueta eje y
plt.grid(True) # Malla en la gráfica
plt.show() # Mostrar la figura
print("El número de periodos que se debe dejar invertido el dinero para llegar a la meta de ", meta," es ", k,
". Al final del periodo ", k,", el capital es ", C_k, ".", sep="")
interact_manual(interes_simple, C_0=fixed(10000), meta=(10000,12000,100), i=fixed(0.008));
#Solución
C_0, goal, i = 10000, 11000, 0.008
k = np.ceil(np.log(goal/C_0)/np.log(1+i))
k = k.astype(int)
C_k = ((1+i)**k)*C_0
C_k = round(C_k,2)
print("El número de periodos requeridos para alcanzar la meta son ",k,
". La cantidad de dinero al final del período ",k,", es ",C_k,sep="")
def interes_compuesto(C_0, meta, i):
# Despejamos k
k = np.ceil(np.log(meta/C_0)/np.log(1+i))
k = k.astype(int)
C_k = C_0*(1+i)**k # Cálculo del capital al final del periodo k
# Vector de periodos
kk = np.linspace(0,k,k+1)
# Vector de capitales por periodo
CC = C_0*(1+i)**kk
# Gráfico
plt.figure(num=1); plt.clf() # Figura 1, borrar lo que contenga
plt.plot(kk, CC,'*',linewidth=3.0) # Se grafica la evolución de los capitales
plt.plot(kk,meta*np.ones(k+1),'--k') # Se grafica la meta
plt.xlabel('$k$',fontsize=18) # Etiqueta eje x
plt.ylabel('$C_k$',fontsize=18) # Etiqueta eje y
plt.grid(True) # Malla en la gráfica
plt.show() # Mostrar la figura
print("El número de periodos que se debe dejar invertido el dinero para llegar a la meta de ", meta," es ", k,
". Al final del periodo ", k,", el capital es ", C_k, ".", sep="")
interact_manual(interes_compuesto, C_0=fixed(10000), meta=(10000,20000,100), i=fixed(0.008));
interact_manual(interes_compuesto, C_0=fixed(10000), meta=fixed(11000), i=(0.01,0.05,0.01));
interact_manual(interes_compuesto, C_0=(10000,15000,1000), meta=fixed(18000), i=fixed(0.008));
# Librerías para integración numérica
from scipy.integrate import odeint
# Modelo de capitalización continua
def cap_continuo(C,t,r):
return r*C
def interes_continuo(C_0, meta, r):
# Despejamos t
t = np.log(meta/C_0)/r
# Vector de periodos
tt = np.linspace(0,t,100)
# Vector de capitales por periodo
CC = odeint(cap_continuo,C_0,tt,args=(r,))
# Gráfico
plt.figure(num=1); plt.clf() # Figura 1, borrar lo que contenga
plt.plot(tt, CC,'-',linewidth=3.0) # Se grafica la evolución de los capitales
plt.plot(tt,meta*np.ones(len(tt)),'--k') # Se grafica la meta
plt.xlabel('t') # Etiqueta eje x
plt.ylabel('C(t)') # Etiqueta eje y
plt.grid(True) # Malla en la gráfica
plt.show() # Mostrar la figura
print("El tiempo que se debe dejar invertido el dinero para llegar a la meta de ", meta," es ", t, " meses.", sep="")
interact_manual(interes_continuo, C_0=fixed(10000), meta=(11000,20000,100), r=fixed(np.log(1+i)));
# Librería de análisis de datos
import pandas as pd
# Librería para manipulación de fechas
from datetime import date
# Datos para la cuenta de ahorro
Tasa_interes = 0.005
Anos = 3
Abonos_ano = 12
Inicial = 1000
Meta = 80000
fecha_inicial = (date(2019,5,2))
# Cálculo de abonos mensuales iguales para llegar a la meta de ahorro en el tiempo deseado
Abono = -np.pmt(Tasa_interes/Abonos_ano,Anos*Abonos_ano,(Meta-Inicial))
# Rango de fechas en la tabla
rng = pd.date_range(fecha_inicial,periods = Anos*Abonos_ano,freq='MS')
rng.name = "Fecha del abono"
# Columnas de abonos y extracto de cuenta en las fechas respectivas
df = pd.DataFrame(index=rng,columns=['Abono','Balance'],dtype='float')
# Para poner el indice con los periodos y no con las fechas
df.reset_index(inplace=True)
df.index+=1
df.index.name = "Período" # Columna correspondiente al periodo
# Los abonos son iguales
df["Abono"] = Abono
# Vector indizador de los periodos de los abonos
index_vector = np.arange(1,len(rng)+1)
# Extracto de la cuenta mes a mes
df["Balance"] = Inicial*(1+Tasa_interes/Abonos_ano)**index_vector+Abono*(((1+Tasa_interes/Abonos_ano)**index_vector-1)/(Tasa_interes/Abonos_ano))
df = df.round(2)
df
# Redondear con dos cifras decimales y mostrar DataFrame
| 0.279435 | 0.942135 |
```
!pip install matplotlib
!pip install pandas
!pip install np
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import os
```
# Volumes of Product
```
data_dir = "../sw21/datasets/"
# Variables per agent and time step
df1r1_steps = pd.read_csv(data_dir+"ex1_r1_agent_steps.csv" , delimiter=";", header=0)
df2r1_steps = pd.read_csv(data_dir+"ex2_r1_agent_steps.csv" , delimiter=";", header=0)
df1r2_steps = pd.read_csv(data_dir+"ex1_r2_agent_steps.csv" , delimiter=";", header=0)
df2r2_steps = pd.read_csv(data_dir+"ex2_r2_agent_steps.csv" , delimiter=";", header=0)
df1r3_steps = pd.read_csv(data_dir+"ex1_r3_agent_steps.csv" , delimiter=";", header=0)
df2r3_steps = pd.read_csv(data_dir+"ex2_r3_agent_steps.csv" , delimiter=";", header=0)
df1r4_steps = pd.read_csv(data_dir+"ex1_r4_agent_steps.csv" , delimiter=";", header=0)
df2r4_steps = pd.read_csv(data_dir+"ex2_r5_agent_steps.csv" , delimiter=";", header=0)
df1r5_steps = pd.read_csv(data_dir+"ex1_r5_agent_steps.csv" , delimiter=";", header=0)
df2r5_steps = pd.read_csv(data_dir+"ex2_r6_agent_steps.csv" , delimiter=";", header=0)
df1r6_steps = pd.read_csv(data_dir+"ex1_r6_agent_steps.csv" , delimiter=";", header=0)
df2r6_steps = pd.read_csv(data_dir+"ex2_r6_agent_steps.csv" , delimiter=";", header=0)
df1r7_steps = pd.read_csv(data_dir+"ex1_r7_agent_steps.csv" , delimiter=";", header=0)
df2r7_steps = pd.read_csv(data_dir+"ex2_r7_agent_steps.csv" , delimiter=";", header=0)
df1r8_steps = pd.read_csv(data_dir+"ex1_r8_agent_steps.csv" , delimiter=";", header=0)
df2r8_steps = pd.read_csv(data_dir+"ex2_r8_agent_steps.csv" , delimiter=";", header=0)
agents = ['Grower_1','Grower_2','Distributor','Retailer','Transporter_1 (LD)','Transporter_2 (LD)', 'Transporter (SD)']
titles = {"q_stock":"Stock","q_wasted":"Wasted","q_consumed":"Consumed"}
def plots(df,field,ex):
df_p = df[['step','id',field]]
fig, ax = plt.subplots(figsize=(8,6))
pls = []
for label, dfg in df_p.groupby('id'):
dfg.plot('step',field,ax=ax, label=agents[label-1], title=ex+": "+titles[field], marker = '.')
plt.ylabel("product units")
plt.xlabel("ticks")
plt.legend(handles=ax.get_legend_handles_labels()[0], title='Agents', bbox_to_anchor=(1.05, 1), loc='upper left')
```
### Wasted Details
```
df_steps = [df1r1_steps, df1r2_steps, df1r3_steps, df1r4_steps, df1r5_steps, df1r6_steps, df1r7_steps, df1r8_steps, df2r1_steps, df2r2_steps, df2r3_steps, df2r4_steps, df2r5_steps, df2r6_steps, df2r7_steps, df2r8_steps]
df_waste = [[0]*301 for p in range(16)]
df_cons = [[0]*301 for p in range(16)]
## define df_waste[simulation number][time period]
time_step = 0
i = 0
while i < 16 :
while time_step < 301:
df_waste[i][time_step] = df_steps[i][df_steps[i]["step"]== time_step].sum()["q_wasted"]
df_cons[i][time_step]= df_steps[i][df_steps[i]["step"]== time_step].sum()["q_consumed"]
time_step += 1
time_step = 0
i += 1
plt.plot(df_cons[0])
plt.plot(df_cons[1])
plt.plot(df_cons[2])
plt.plot(df_cons[3])
plt.plot(df_cons[4])
plt.plot(df_cons[5])
plt.plot(df_cons[6])
plt.plot(df_cons[7])
plt.title('Consumption Scernario 1')
plt.xlabel('Time Steps')
plt.ylabel('Volume of Product')
plt.show()
plt.plot(df_cons[8])
plt.plot(df_cons[9])
plt.plot(df_cons[10])
plt.plot(df_cons[11])
plt.plot(df_cons[12])
plt.plot(df_cons[13])
plt.plot(df_cons[14])
plt.plot(df_cons[15])
plt.title('Consumption Secnario 2')
plt.xlabel('Time Steps')
plt.ylabel('Volume of Product')
plt.show()
plt.plot(df_waste[0])
plt.plot(df_waste[1])
plt.plot(df_waste[2])
plt.plot(df_waste[3])
plt.plot(df_waste[4])
plt.plot(df_waste[5])
plt.plot(df_waste[6])
plt.plot(df_waste[7])
plt.title('Waste Scenario 1')
plt.xlabel('Time Steps')
plt.ylabel('Volume of Product')
plt.show()
plt.plot(df_waste[8])
plt.plot(df_waste[9])
plt.plot(df_waste[10])
plt.plot(df_waste[11])
plt.plot(df_waste[12])
plt.plot(df_waste[13])
plt.plot(df_waste[14])
plt.plot(df_waste[15])
plt.title('Waste Scenario 2')
plt.xlabel('Time Steps')
plt.ylabel('Volume of Product')
plt.show()
## Define df_waste_total[simulation]
df_waste_total = np.array([0.000]*16)
j = 0
df_steps[j].sum()["q_wasted"]
while j < 16:
w_tot = df_steps[j].sum()["q_wasted"]
c_tot = df_steps[j].sum()["q_consumed"]
final_stock = df_steps[j][df_steps[j]["step"]== 500].sum()["q_stock"]
df_waste_total[j] = (w_tot/(c_tot+w_tot+final_stock))*100
j += 1
# Out of date
ex1_waste_labels = ['Run 1', 'Run 2', 'Run 3', 'Run 4', 'Run 5', 'Run 6', 'Run 7', 'Run 8', ]
ex1_waste = [df1r1_w_per, df1r2_w_per, df1r3_w_per, df1r4_w_per, df1r5_w_per, df1r6_w_per, df1r7_w_per, df1r8_w_per]
x_pos = [i for i, _ in enumerate(ex1_waste_labels)]
plt.bar(x_pos, ex1_waste, color='green')
plt.xlabel("Simulation Runs")
plt.ylabel("Waste Percentage")
plt.title("Waste Percentate in Full Information Scenario")
plt.xticks(x_pos, ex1_waste_labels)
plt.show()
pd.DataFrame(ex1_waste, ex1_waste_labels)
# Out of date
ex2_waste_labels = ['Run 1', 'Run 2', 'Run 3', 'Run 4', 'Run 5', 'Run 6', 'Run 7', 'Run 8', ]
ex2_waste = [df2r1_w_per, df2r2_w_per, df2r3_w_per, df2r4_w_per, df2r5_w_per, df2r6_w_per, df2r7_w_per, df2r8_w_per]
x_pos = [i for i, _ in enumerate(ex2_waste_labels)]
print(x_pos)
plt.bar(x_pos, ex2_waste, color='blue')
plt.xlabel("Simulation Runs")
plt.ylabel("Waste Percentage")
plt.title("Waste Percentate in Zero Information Scenario")
plt.xticks(x_pos, ex2_waste_labels)
plt.show()
pd.DataFrame(ex2_waste, ex2_waste_labels)
# Out of date
data = [ex1_waste, ex2_waste]
labels = ['Full Information', 'No Information']
color_list = ['b', 'g', 'r']
gap = .8 / len(data)
for i, row in enumerate(data):
X = np.arange(len(row))
plt.bar(X + i * gap, row,
width = gap,
color = color_list[i % len(color_list)])
plt.xlabel("Simulation Runs")
plt.ylabel("Waste Percentage")
plt.title("Waste Percentate Comparison")
plt.legend(labels)
plt.show()
```
### Stock
```
plots(df1r1_steps,'q_stock','ex1r1')
plots(df1r2_steps,'q_stock','ex1r2')
plots(df2r1_steps,'q_stock','ex2r1')
plots(df2r2_steps,'q_stock','ex2r2')
```
### Consumed
```
plots(df1r1_steps,'q_consumed',"ex1")
plots(df2r1_steps,'q_consumed',"ex2")
plots(df1r2_steps,'q_consumed',"ex3")
plots(df2r2_steps,'q_consumed',"ex4")
```
### Wasted
```
plots(df1r1_steps,'q_wasted',"ex1")
plots(df2r1_steps,'q_wasted',"ex2")
plots(df1r2_steps,'q_wasted',"ex3")
plots(df2r2_steps,'q_wasted',"ex5")
```
# Product Decay
```
df_steps = [df1r1_steps, df1r2_steps, df1r3_steps, df1r4_steps, df1r5_steps, df1r6_steps, df1r7_steps, df1r8_steps, df2r1_steps, df2r2_steps, df2r3_steps, df2r4_steps, df2r5_steps, df2r6_steps, df2r7_steps, df2r8_steps]
df_waste = [[0]*301 for p in range(16)]
df_cons = [[0]*301 for p in range(16)]
## define df_waste[simulation number][time period]
time_step = 0
i = 0
while i < 16 :
while time_step < 301:
df_waste[i][time_step] = df_steps[i][df_steps[i]["step"]== time_step].sum()["q_wasted"]
df_cons[i][time_step]= df_steps[i][df_steps[i]["step"]== time_step].sum()["q_consumed"]
time_step += 1
time_step = 0
i += 1
plt.plot(df_waste[0])
```
|
github_jupyter
|
!pip install matplotlib
!pip install pandas
!pip install np
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import os
data_dir = "../sw21/datasets/"
# Variables per agent and time step
df1r1_steps = pd.read_csv(data_dir+"ex1_r1_agent_steps.csv" , delimiter=";", header=0)
df2r1_steps = pd.read_csv(data_dir+"ex2_r1_agent_steps.csv" , delimiter=";", header=0)
df1r2_steps = pd.read_csv(data_dir+"ex1_r2_agent_steps.csv" , delimiter=";", header=0)
df2r2_steps = pd.read_csv(data_dir+"ex2_r2_agent_steps.csv" , delimiter=";", header=0)
df1r3_steps = pd.read_csv(data_dir+"ex1_r3_agent_steps.csv" , delimiter=";", header=0)
df2r3_steps = pd.read_csv(data_dir+"ex2_r3_agent_steps.csv" , delimiter=";", header=0)
df1r4_steps = pd.read_csv(data_dir+"ex1_r4_agent_steps.csv" , delimiter=";", header=0)
df2r4_steps = pd.read_csv(data_dir+"ex2_r5_agent_steps.csv" , delimiter=";", header=0)
df1r5_steps = pd.read_csv(data_dir+"ex1_r5_agent_steps.csv" , delimiter=";", header=0)
df2r5_steps = pd.read_csv(data_dir+"ex2_r6_agent_steps.csv" , delimiter=";", header=0)
df1r6_steps = pd.read_csv(data_dir+"ex1_r6_agent_steps.csv" , delimiter=";", header=0)
df2r6_steps = pd.read_csv(data_dir+"ex2_r6_agent_steps.csv" , delimiter=";", header=0)
df1r7_steps = pd.read_csv(data_dir+"ex1_r7_agent_steps.csv" , delimiter=";", header=0)
df2r7_steps = pd.read_csv(data_dir+"ex2_r7_agent_steps.csv" , delimiter=";", header=0)
df1r8_steps = pd.read_csv(data_dir+"ex1_r8_agent_steps.csv" , delimiter=";", header=0)
df2r8_steps = pd.read_csv(data_dir+"ex2_r8_agent_steps.csv" , delimiter=";", header=0)
agents = ['Grower_1','Grower_2','Distributor','Retailer','Transporter_1 (LD)','Transporter_2 (LD)', 'Transporter (SD)']
titles = {"q_stock":"Stock","q_wasted":"Wasted","q_consumed":"Consumed"}
def plots(df,field,ex):
df_p = df[['step','id',field]]
fig, ax = plt.subplots(figsize=(8,6))
pls = []
for label, dfg in df_p.groupby('id'):
dfg.plot('step',field,ax=ax, label=agents[label-1], title=ex+": "+titles[field], marker = '.')
plt.ylabel("product units")
plt.xlabel("ticks")
plt.legend(handles=ax.get_legend_handles_labels()[0], title='Agents', bbox_to_anchor=(1.05, 1), loc='upper left')
df_steps = [df1r1_steps, df1r2_steps, df1r3_steps, df1r4_steps, df1r5_steps, df1r6_steps, df1r7_steps, df1r8_steps, df2r1_steps, df2r2_steps, df2r3_steps, df2r4_steps, df2r5_steps, df2r6_steps, df2r7_steps, df2r8_steps]
df_waste = [[0]*301 for p in range(16)]
df_cons = [[0]*301 for p in range(16)]
## define df_waste[simulation number][time period]
time_step = 0
i = 0
while i < 16 :
while time_step < 301:
df_waste[i][time_step] = df_steps[i][df_steps[i]["step"]== time_step].sum()["q_wasted"]
df_cons[i][time_step]= df_steps[i][df_steps[i]["step"]== time_step].sum()["q_consumed"]
time_step += 1
time_step = 0
i += 1
plt.plot(df_cons[0])
plt.plot(df_cons[1])
plt.plot(df_cons[2])
plt.plot(df_cons[3])
plt.plot(df_cons[4])
plt.plot(df_cons[5])
plt.plot(df_cons[6])
plt.plot(df_cons[7])
plt.title('Consumption Scernario 1')
plt.xlabel('Time Steps')
plt.ylabel('Volume of Product')
plt.show()
plt.plot(df_cons[8])
plt.plot(df_cons[9])
plt.plot(df_cons[10])
plt.plot(df_cons[11])
plt.plot(df_cons[12])
plt.plot(df_cons[13])
plt.plot(df_cons[14])
plt.plot(df_cons[15])
plt.title('Consumption Secnario 2')
plt.xlabel('Time Steps')
plt.ylabel('Volume of Product')
plt.show()
plt.plot(df_waste[0])
plt.plot(df_waste[1])
plt.plot(df_waste[2])
plt.plot(df_waste[3])
plt.plot(df_waste[4])
plt.plot(df_waste[5])
plt.plot(df_waste[6])
plt.plot(df_waste[7])
plt.title('Waste Scenario 1')
plt.xlabel('Time Steps')
plt.ylabel('Volume of Product')
plt.show()
plt.plot(df_waste[8])
plt.plot(df_waste[9])
plt.plot(df_waste[10])
plt.plot(df_waste[11])
plt.plot(df_waste[12])
plt.plot(df_waste[13])
plt.plot(df_waste[14])
plt.plot(df_waste[15])
plt.title('Waste Scenario 2')
plt.xlabel('Time Steps')
plt.ylabel('Volume of Product')
plt.show()
## Define df_waste_total[simulation]
df_waste_total = np.array([0.000]*16)
j = 0
df_steps[j].sum()["q_wasted"]
while j < 16:
w_tot = df_steps[j].sum()["q_wasted"]
c_tot = df_steps[j].sum()["q_consumed"]
final_stock = df_steps[j][df_steps[j]["step"]== 500].sum()["q_stock"]
df_waste_total[j] = (w_tot/(c_tot+w_tot+final_stock))*100
j += 1
# Out of date
ex1_waste_labels = ['Run 1', 'Run 2', 'Run 3', 'Run 4', 'Run 5', 'Run 6', 'Run 7', 'Run 8', ]
ex1_waste = [df1r1_w_per, df1r2_w_per, df1r3_w_per, df1r4_w_per, df1r5_w_per, df1r6_w_per, df1r7_w_per, df1r8_w_per]
x_pos = [i for i, _ in enumerate(ex1_waste_labels)]
plt.bar(x_pos, ex1_waste, color='green')
plt.xlabel("Simulation Runs")
plt.ylabel("Waste Percentage")
plt.title("Waste Percentate in Full Information Scenario")
plt.xticks(x_pos, ex1_waste_labels)
plt.show()
pd.DataFrame(ex1_waste, ex1_waste_labels)
# Out of date
ex2_waste_labels = ['Run 1', 'Run 2', 'Run 3', 'Run 4', 'Run 5', 'Run 6', 'Run 7', 'Run 8', ]
ex2_waste = [df2r1_w_per, df2r2_w_per, df2r3_w_per, df2r4_w_per, df2r5_w_per, df2r6_w_per, df2r7_w_per, df2r8_w_per]
x_pos = [i for i, _ in enumerate(ex2_waste_labels)]
print(x_pos)
plt.bar(x_pos, ex2_waste, color='blue')
plt.xlabel("Simulation Runs")
plt.ylabel("Waste Percentage")
plt.title("Waste Percentate in Zero Information Scenario")
plt.xticks(x_pos, ex2_waste_labels)
plt.show()
pd.DataFrame(ex2_waste, ex2_waste_labels)
# Out of date
data = [ex1_waste, ex2_waste]
labels = ['Full Information', 'No Information']
color_list = ['b', 'g', 'r']
gap = .8 / len(data)
for i, row in enumerate(data):
X = np.arange(len(row))
plt.bar(X + i * gap, row,
width = gap,
color = color_list[i % len(color_list)])
plt.xlabel("Simulation Runs")
plt.ylabel("Waste Percentage")
plt.title("Waste Percentate Comparison")
plt.legend(labels)
plt.show()
plots(df1r1_steps,'q_stock','ex1r1')
plots(df1r2_steps,'q_stock','ex1r2')
plots(df2r1_steps,'q_stock','ex2r1')
plots(df2r2_steps,'q_stock','ex2r2')
plots(df1r1_steps,'q_consumed',"ex1")
plots(df2r1_steps,'q_consumed',"ex2")
plots(df1r2_steps,'q_consumed',"ex3")
plots(df2r2_steps,'q_consumed',"ex4")
plots(df1r1_steps,'q_wasted',"ex1")
plots(df2r1_steps,'q_wasted',"ex2")
plots(df1r2_steps,'q_wasted',"ex3")
plots(df2r2_steps,'q_wasted',"ex5")
df_steps = [df1r1_steps, df1r2_steps, df1r3_steps, df1r4_steps, df1r5_steps, df1r6_steps, df1r7_steps, df1r8_steps, df2r1_steps, df2r2_steps, df2r3_steps, df2r4_steps, df2r5_steps, df2r6_steps, df2r7_steps, df2r8_steps]
df_waste = [[0]*301 for p in range(16)]
df_cons = [[0]*301 for p in range(16)]
## define df_waste[simulation number][time period]
time_step = 0
i = 0
while i < 16 :
while time_step < 301:
df_waste[i][time_step] = df_steps[i][df_steps[i]["step"]== time_step].sum()["q_wasted"]
df_cons[i][time_step]= df_steps[i][df_steps[i]["step"]== time_step].sum()["q_consumed"]
time_step += 1
time_step = 0
i += 1
plt.plot(df_waste[0])
| 0.160036 | 0.647965 |
```
import csv
import datetime
import json
import matplotlib.pyplot as plt
import numpy as np
import os
```
## Constants
```
LOGDIR = '../trace-data'
DATE_FORMAT_STR = '%Y-%m-%d %H:%M:%S'
MINUTES_PER_DAY = (24 * 60)
MICROSECONDS_PER_MINUTE = (60 * 1000)
```
## Utility code
```
def parse_date(date_str):
"""Parses a date string and returns a datetime object if possible.
Args:
date_str: A string representing a date.
Returns:
A datetime object if the input string could be successfully
parsed, None otherwise.
"""
if date_str is None or date_str == '' or date_str == 'None':
return None
return datetime.datetime.strptime(date_str, DATE_FORMAT_STR)
def timedelta_to_minutes(timedelta):
"""Converts a datetime timedelta object to minutes.
Args:
timedelta: The timedelta to convert.
Returns:
The number of minutes captured in the timedelta.
"""
minutes = 0.0
minutes += timedelta.days * MINUTES_PER_DAY
minutes += timedelta.seconds / 60.0
minutes += timedelta.microseconds / MICROSECONDS_PER_MINUTE
return minutes
def round_to_nearest_minute(t):
"""Rounds a datetime object down to the nearest minute.
Args:
t: A datetime object.
Returns:
A new rounded down datetime object.
"""
return t - datetime.timedelta(seconds=t.second, microseconds=t.microsecond)
def add_minute(t):
"""Adds a single minute to a datetime object.
Args:
t: A datetime object.
Returns:
A new datetime object with an additional minute.
"""
return t + datetime.timedelta(seconds=60)
def get_cdf(data):
"""Returns the CDF of the given data.
Args:
data: A list of numerical values.
Returns:
An pair of lists (x, y) for plotting the CDF.
"""
sorted_data = sorted(data)
p = 100. * np.arange(len(sorted_data)) / (len(sorted_data) - 1)
return sorted_data, p
class Job:
"""Encapsulates a job."""
def __init__(self, status, vc, jobid, attempts, submitted_time, user):
"""Records job parameters and computes key metrics.
Stores the passed in arguments as well as the number of GPUs
requested by the job. In addition, computes the queueing delay
as defined as the delta between the submission time and the start
time of the first attempt. Finally, computes run time as defined
as the delta between the initial attempt's start time and the last
attempt's finish time.
NOTE: Some jobs do not have any recorded attempts, and some attempts
have missing start and/or end times. A job's latest attempt having no
end time indicates that the job was still running when the log data
was collected.
Args:
status: One of 'Pass', 'Killed', 'Failed'.
vc: The hash of the virtual cluster id the job was run in.
jobid: The hash of the job id.
attempts: A list of dicts, where each dict contains the following keys:
'start_time': The start time of the attempt.
'end_time': The end time of the attempt.
'detail': A list of nested dicts where each dict contains
the following keys:
'ip': The server id.
'gpus': A list of the GPU ids allotted for this attempt.
submitted_time: The time the job was submitted to the queue.
user: The user's id.
"""
self._status = status
self._vc = vc
self._jobid = jobid
for attempt in attempts:
attempt['start_time'] = parse_date(attempt['start_time'])
attempt['end_time'] = parse_date(attempt['end_time'])
self._attempts = attempts
self._submitted_time = parse_date(submitted_time)
self._user = user
if len(self._attempts) == 0:
self._num_gpus = None
self._run_time = None
self._queueing_delay = None
else:
self._num_gpus = sum([len(detail['gpus']) for detail in self._attempts[0]['detail']])
if self._attempts[0]['start_time'] is None:
self._run_time = None
self._queueing_delay = None
else:
if self._attempts[-1]['end_time'] is None:
self._run_time = None
else:
self._run_time = \
timedelta_to_minutes(self._attempts[-1]['end_time'] -
self._attempts[0]['start_time'])
self._queueing_delay = \
timedelta_to_minutes(self._attempts[0]['start_time'] -
self._submitted_time)
@property
def status(self):
return self._status
@property
def vc(self):
return self._vc
@property
def jobid(self):
return self._jobid
@property
def attempts(self):
return self._attempts
@property
def submitted_time(self):
return self._submitted_time
@property
def user(self):
return self._user
@property
def num_gpus(self):
return self._num_gpus
@property
def queueing_delay(self):
return self._queueing_delay
@property
def run_time(self):
return self._run_time
def get_bucket_from_num_gpus(num_gpus):
"""Maps GPU count to a bucket for plotting purposes."""
if num_gpus is None:
return None
if num_gpus == 1:
return 0
elif num_gpus >= 2 and num_gpus <= 4:
return 1
elif num_gpus >= 5 and num_gpus <= 8:
return 2
elif num_gpus > 8:
return 3
else:
return None
def get_plot_config_from_bucket(bucket):
"""Returns plotting configuration information."""
if bucket == 0:
return ('1', 'green', '-')
elif bucket == 1:
return ('2-4', 'blue', '-.')
elif bucket == 2:
return ('5-8', 'red', '--')
elif bucket == 3:
return ('>8', 'purple', ':')
```
## Load the cluster log
```
cluster_job_log_path = os.path.join(LOGDIR, 'cluster_job_log')
with open(cluster_job_log_path, 'r') as f:
cluster_job_log = json.load(f)
jobs = [Job(**job) for job in cluster_job_log]
```
# Job Runtimes (Figure 2)
```
run_times = {}
for job in jobs:
num_gpus = job.num_gpus
bucket = get_bucket_from_num_gpus(num_gpus)
if bucket is None:
continue
if bucket not in run_times:
run_times[bucket] = []
run_time = job.run_time
if run_time is not None:
run_times[bucket].append(run_time)
buckets = sorted([bucket for bucket in run_times])
for bucket in buckets:
num_gpus, color, linestyle = get_plot_config_from_bucket(bucket)
x, y = get_cdf(run_times[bucket])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.legend(loc='lower right')
plt.xscale('log')
plt.xlim(10 ** -1, 10 ** 4)
plt.ylim(0, 100)
plt.xlabel('Time (min)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
plt.show()
```
# Queueing Delay (Figure 3)
```
queueing_delays = {}
for job in jobs:
vc = job.vc
if vc not in queueing_delays:
queueing_delays[vc] = {}
bucket = get_bucket_from_num_gpus(job.num_gpus)
if bucket is None:
continue
if bucket not in queueing_delays[vc]:
queueing_delays[vc][bucket] = []
# NOTE: Each period between the job being placed on the queue
# and being scheduled on a machine is recorded as an individual
# queueing delay.
queueing_delay = 0.0
queue_time = job.submitted_time
for attempt in job.attempts:
start_time = attempt['start_time']
if queue_time is not None and start_time is not None:
queueing_delay = timedelta_to_minutes(start_time - queue_time)
queue_time = attempt['end_time']
queueing_delays[vc][bucket].append(queueing_delay)
for vc in queueing_delays:
for bucket in queueing_delays[vc]:
queueing_delays[vc][bucket] = filter(None, queueing_delays[vc][bucket])
vcs = queueing_delays.keys()
for i, vc in enumerate(vcs):
for bucket in queueing_delays[vc]:
num_gpus, color, linestyle = get_plot_config_from_bucket(bucket)
x, y = get_cdf(queueing_delays[vc][bucket])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.title('VC %s' % (vc))
plt.legend(loc='lower right')
plt.xscale('log')
plt.ylim(0, 100)
plt.xlim(10 ** -1, 10 ** 4)
plt.xlabel('Time (min)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
if i < len(vcs) - 1:
plt.figure()
plt.show()
```
# Locality Constraints (Figure 4)
```
data = {}
for i, job in enumerate(jobs):
if len(job.attempts) == 0:
continue
num_gpus = job.num_gpus
if num_gpus < 5:
continue
bucket = get_bucket_from_num_gpus(num_gpus)
if bucket not in data:
data[bucket] = {
'x': [],
'y': []
}
queueing_delay = job.queueing_delay
num_servers = len(job.attempts[0]['detail'])
data[bucket]['x'].append(queueing_delay)
data[bucket]['y'].append(num_servers)
for bucket in data:
num_gpus, _, _ = get_plot_config_from_bucket(bucket)
if bucket == 2:
marker = '+'
facecolors = 'black'
edgecolors = 'none'
else:
marker = 'o'
facecolors = 'none'
edgecolors = 'red'
plt.scatter(data[bucket]['x'], data[bucket]['y'], label='%s GPU' % (num_gpus),
marker=marker, facecolors=facecolors, edgecolors=edgecolors)
plt.legend()
plt.xscale('log')
plt.xlabel('Time (min)')
plt.ylabel('Num. Servers')
plt.show()
```
# GPU Utilization (Figures 5, 6)
```
gpu_util_path = os.path.join(LOGDIR, 'cluster_gpu_util')
gpu_util = {}
with open(gpu_util_path, 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
time = row[0][:-4] # Remove the timezone
machineId = row[1]
if machineId not in gpu_util:
gpu_util[machineId] = {}
gpu_util[machineId][time] = row[2:-1] # Ignore extra empty string at the end
def get_utilization_data(jobs, only_large_jobs=False, only_dedicated_servers=False):
"""Aggregates GPU utilization data for a set of jobs.
Args:
jobs: A list of Jobs.
only_large_jobs: If True, only considers jobs of size 8 or 16 GPUs.
Otherwise, considers jobs of size 1, 4, 8, or 16 GPUs.
only_dedicated_servers: If True, only considers jobs that use all GPUs
available on a server(s).
Returns:
A dict indexed by 1) job completion status, 2) number of GPUs requested
by the job, and 3) timestamp. The value of each nested dict is a list of
percentages indicating the utilization of each individual GPU on the
servers used by the job at the particular time requested.
"""
data = {}
for job in jobs:
num_gpus = job.num_gpus
if (len(job.attempts) == 0 or
(num_gpus != 1 and num_gpus != 4 and num_gpus != 8 and num_gpus != 16)):
continue
if only_large_jobs and num_gpus < 8:
continue
status = job.status
if status not in data:
data[status] = {}
if num_gpus not in data[status]:
data[status][num_gpus] = []
for attempt in job.attempts:
if only_dedicated_servers and len(attempt['detail']) > (num_gpus / 8):
continue
current_time = attempt['start_time']
if current_time is None or attempt['end_time'] is None:
continue
current_minute = round_to_nearest_minute(current_time)
while current_minute < attempt['end_time']:
current_minute_str = str(current_minute)
for detail in attempt['detail']:
machineId = detail['ip']
if current_minute_str in gpu_util[machineId]:
for gpu_id in detail['gpus']:
gpu_num = int(gpu_id[3:]) # Remove the 'gpu' prefix
try:
u = gpu_util[machineId][current_minute_str][gpu_num]
if u != 'NA':
data[status][num_gpus].append(float(u))
except Exception as e:
print(gpu_util[machineId][current_minute_str])
print(gpu_num)
raise ValueError(e)
current_minute = add_minute(current_minute)
return data
data = get_utilization_data(jobs)
statuses = data.keys()
for i, status in enumerate(statuses):
all_num_gpus = sorted(data[status].keys())
for num_gpus in all_num_gpus:
if num_gpus == 1:
color = 'green'
linestyle = '-'
elif num_gpus == 4:
color = 'blue'
linestyle = '-.'
elif num_gpus == 8:
color = 'red'
linestyle = '--'
elif num_gpus == 16:
color = 'cyan'
linestyle = ':'
x, y = get_cdf(data[status][num_gpus])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.title(status)
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
if i < len(statuses) - 1:
plt.figure()
plt.show()
data = get_utilization_data(jobs, only_large_jobs=True, only_dedicated_servers=True)
aggregate_data = {}
for status in data:
for num_gpus in data[status]:
if num_gpus not in aggregate_data:
aggregate_data[num_gpus] = []
aggregate_data[num_gpus] += data[status][num_gpus]
all_num_gpus = sorted(aggregate_data.keys())
for num_gpus in all_num_gpus:
if num_gpus == 8:
linestyle = '-'
elif num_gpus == 16:
linestyle = '-.'
x, y = get_cdf(aggregate_data[num_gpus])
plt.plot(x, y, label='%s GPU' % (num_gpus), color='black', linestyle=linestyle)
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
plt.show()
```
# Host Resource Utilization (Figure 7)
```
mem_util_path = os.path.join(LOGDIR, 'cluster_mem_util')
mem_util = []
with open(mem_util_path, 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
if row[2] == 'NA':
continue
mem_total = float(row[2])
mem_free = float(row[3])
if mem_total == 0:
continue
mem_util.append(100.0 * (mem_total - mem_free) / mem_total)
cpu_util_path = os.path.join(LOGDIR, 'cluster_cpu_util')
cpu_util = []
with open(cpu_util_path, 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
if row[2] == 'NA':
continue
cpu_util.append(float(row[2]))
x, y = get_cdf(cpu_util)
plt.plot(x, y, label='CPU', color='black', linestyle='-')
x, y = get_cdf(mem_util)
plt.plot(x, y, label='Memory', color='black', linestyle='-.')
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.show()
```
|
github_jupyter
|
import csv
import datetime
import json
import matplotlib.pyplot as plt
import numpy as np
import os
LOGDIR = '../trace-data'
DATE_FORMAT_STR = '%Y-%m-%d %H:%M:%S'
MINUTES_PER_DAY = (24 * 60)
MICROSECONDS_PER_MINUTE = (60 * 1000)
def parse_date(date_str):
"""Parses a date string and returns a datetime object if possible.
Args:
date_str: A string representing a date.
Returns:
A datetime object if the input string could be successfully
parsed, None otherwise.
"""
if date_str is None or date_str == '' or date_str == 'None':
return None
return datetime.datetime.strptime(date_str, DATE_FORMAT_STR)
def timedelta_to_minutes(timedelta):
"""Converts a datetime timedelta object to minutes.
Args:
timedelta: The timedelta to convert.
Returns:
The number of minutes captured in the timedelta.
"""
minutes = 0.0
minutes += timedelta.days * MINUTES_PER_DAY
minutes += timedelta.seconds / 60.0
minutes += timedelta.microseconds / MICROSECONDS_PER_MINUTE
return minutes
def round_to_nearest_minute(t):
"""Rounds a datetime object down to the nearest minute.
Args:
t: A datetime object.
Returns:
A new rounded down datetime object.
"""
return t - datetime.timedelta(seconds=t.second, microseconds=t.microsecond)
def add_minute(t):
"""Adds a single minute to a datetime object.
Args:
t: A datetime object.
Returns:
A new datetime object with an additional minute.
"""
return t + datetime.timedelta(seconds=60)
def get_cdf(data):
"""Returns the CDF of the given data.
Args:
data: A list of numerical values.
Returns:
An pair of lists (x, y) for plotting the CDF.
"""
sorted_data = sorted(data)
p = 100. * np.arange(len(sorted_data)) / (len(sorted_data) - 1)
return sorted_data, p
class Job:
"""Encapsulates a job."""
def __init__(self, status, vc, jobid, attempts, submitted_time, user):
"""Records job parameters and computes key metrics.
Stores the passed in arguments as well as the number of GPUs
requested by the job. In addition, computes the queueing delay
as defined as the delta between the submission time and the start
time of the first attempt. Finally, computes run time as defined
as the delta between the initial attempt's start time and the last
attempt's finish time.
NOTE: Some jobs do not have any recorded attempts, and some attempts
have missing start and/or end times. A job's latest attempt having no
end time indicates that the job was still running when the log data
was collected.
Args:
status: One of 'Pass', 'Killed', 'Failed'.
vc: The hash of the virtual cluster id the job was run in.
jobid: The hash of the job id.
attempts: A list of dicts, where each dict contains the following keys:
'start_time': The start time of the attempt.
'end_time': The end time of the attempt.
'detail': A list of nested dicts where each dict contains
the following keys:
'ip': The server id.
'gpus': A list of the GPU ids allotted for this attempt.
submitted_time: The time the job was submitted to the queue.
user: The user's id.
"""
self._status = status
self._vc = vc
self._jobid = jobid
for attempt in attempts:
attempt['start_time'] = parse_date(attempt['start_time'])
attempt['end_time'] = parse_date(attempt['end_time'])
self._attempts = attempts
self._submitted_time = parse_date(submitted_time)
self._user = user
if len(self._attempts) == 0:
self._num_gpus = None
self._run_time = None
self._queueing_delay = None
else:
self._num_gpus = sum([len(detail['gpus']) for detail in self._attempts[0]['detail']])
if self._attempts[0]['start_time'] is None:
self._run_time = None
self._queueing_delay = None
else:
if self._attempts[-1]['end_time'] is None:
self._run_time = None
else:
self._run_time = \
timedelta_to_minutes(self._attempts[-1]['end_time'] -
self._attempts[0]['start_time'])
self._queueing_delay = \
timedelta_to_minutes(self._attempts[0]['start_time'] -
self._submitted_time)
@property
def status(self):
return self._status
@property
def vc(self):
return self._vc
@property
def jobid(self):
return self._jobid
@property
def attempts(self):
return self._attempts
@property
def submitted_time(self):
return self._submitted_time
@property
def user(self):
return self._user
@property
def num_gpus(self):
return self._num_gpus
@property
def queueing_delay(self):
return self._queueing_delay
@property
def run_time(self):
return self._run_time
def get_bucket_from_num_gpus(num_gpus):
"""Maps GPU count to a bucket for plotting purposes."""
if num_gpus is None:
return None
if num_gpus == 1:
return 0
elif num_gpus >= 2 and num_gpus <= 4:
return 1
elif num_gpus >= 5 and num_gpus <= 8:
return 2
elif num_gpus > 8:
return 3
else:
return None
def get_plot_config_from_bucket(bucket):
"""Returns plotting configuration information."""
if bucket == 0:
return ('1', 'green', '-')
elif bucket == 1:
return ('2-4', 'blue', '-.')
elif bucket == 2:
return ('5-8', 'red', '--')
elif bucket == 3:
return ('>8', 'purple', ':')
cluster_job_log_path = os.path.join(LOGDIR, 'cluster_job_log')
with open(cluster_job_log_path, 'r') as f:
cluster_job_log = json.load(f)
jobs = [Job(**job) for job in cluster_job_log]
run_times = {}
for job in jobs:
num_gpus = job.num_gpus
bucket = get_bucket_from_num_gpus(num_gpus)
if bucket is None:
continue
if bucket not in run_times:
run_times[bucket] = []
run_time = job.run_time
if run_time is not None:
run_times[bucket].append(run_time)
buckets = sorted([bucket for bucket in run_times])
for bucket in buckets:
num_gpus, color, linestyle = get_plot_config_from_bucket(bucket)
x, y = get_cdf(run_times[bucket])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.legend(loc='lower right')
plt.xscale('log')
plt.xlim(10 ** -1, 10 ** 4)
plt.ylim(0, 100)
plt.xlabel('Time (min)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
plt.show()
queueing_delays = {}
for job in jobs:
vc = job.vc
if vc not in queueing_delays:
queueing_delays[vc] = {}
bucket = get_bucket_from_num_gpus(job.num_gpus)
if bucket is None:
continue
if bucket not in queueing_delays[vc]:
queueing_delays[vc][bucket] = []
# NOTE: Each period between the job being placed on the queue
# and being scheduled on a machine is recorded as an individual
# queueing delay.
queueing_delay = 0.0
queue_time = job.submitted_time
for attempt in job.attempts:
start_time = attempt['start_time']
if queue_time is not None and start_time is not None:
queueing_delay = timedelta_to_minutes(start_time - queue_time)
queue_time = attempt['end_time']
queueing_delays[vc][bucket].append(queueing_delay)
for vc in queueing_delays:
for bucket in queueing_delays[vc]:
queueing_delays[vc][bucket] = filter(None, queueing_delays[vc][bucket])
vcs = queueing_delays.keys()
for i, vc in enumerate(vcs):
for bucket in queueing_delays[vc]:
num_gpus, color, linestyle = get_plot_config_from_bucket(bucket)
x, y = get_cdf(queueing_delays[vc][bucket])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.title('VC %s' % (vc))
plt.legend(loc='lower right')
plt.xscale('log')
plt.ylim(0, 100)
plt.xlim(10 ** -1, 10 ** 4)
plt.xlabel('Time (min)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
if i < len(vcs) - 1:
plt.figure()
plt.show()
data = {}
for i, job in enumerate(jobs):
if len(job.attempts) == 0:
continue
num_gpus = job.num_gpus
if num_gpus < 5:
continue
bucket = get_bucket_from_num_gpus(num_gpus)
if bucket not in data:
data[bucket] = {
'x': [],
'y': []
}
queueing_delay = job.queueing_delay
num_servers = len(job.attempts[0]['detail'])
data[bucket]['x'].append(queueing_delay)
data[bucket]['y'].append(num_servers)
for bucket in data:
num_gpus, _, _ = get_plot_config_from_bucket(bucket)
if bucket == 2:
marker = '+'
facecolors = 'black'
edgecolors = 'none'
else:
marker = 'o'
facecolors = 'none'
edgecolors = 'red'
plt.scatter(data[bucket]['x'], data[bucket]['y'], label='%s GPU' % (num_gpus),
marker=marker, facecolors=facecolors, edgecolors=edgecolors)
plt.legend()
plt.xscale('log')
plt.xlabel('Time (min)')
plt.ylabel('Num. Servers')
plt.show()
gpu_util_path = os.path.join(LOGDIR, 'cluster_gpu_util')
gpu_util = {}
with open(gpu_util_path, 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
time = row[0][:-4] # Remove the timezone
machineId = row[1]
if machineId not in gpu_util:
gpu_util[machineId] = {}
gpu_util[machineId][time] = row[2:-1] # Ignore extra empty string at the end
def get_utilization_data(jobs, only_large_jobs=False, only_dedicated_servers=False):
"""Aggregates GPU utilization data for a set of jobs.
Args:
jobs: A list of Jobs.
only_large_jobs: If True, only considers jobs of size 8 or 16 GPUs.
Otherwise, considers jobs of size 1, 4, 8, or 16 GPUs.
only_dedicated_servers: If True, only considers jobs that use all GPUs
available on a server(s).
Returns:
A dict indexed by 1) job completion status, 2) number of GPUs requested
by the job, and 3) timestamp. The value of each nested dict is a list of
percentages indicating the utilization of each individual GPU on the
servers used by the job at the particular time requested.
"""
data = {}
for job in jobs:
num_gpus = job.num_gpus
if (len(job.attempts) == 0 or
(num_gpus != 1 and num_gpus != 4 and num_gpus != 8 and num_gpus != 16)):
continue
if only_large_jobs and num_gpus < 8:
continue
status = job.status
if status not in data:
data[status] = {}
if num_gpus not in data[status]:
data[status][num_gpus] = []
for attempt in job.attempts:
if only_dedicated_servers and len(attempt['detail']) > (num_gpus / 8):
continue
current_time = attempt['start_time']
if current_time is None or attempt['end_time'] is None:
continue
current_minute = round_to_nearest_minute(current_time)
while current_minute < attempt['end_time']:
current_minute_str = str(current_minute)
for detail in attempt['detail']:
machineId = detail['ip']
if current_minute_str in gpu_util[machineId]:
for gpu_id in detail['gpus']:
gpu_num = int(gpu_id[3:]) # Remove the 'gpu' prefix
try:
u = gpu_util[machineId][current_minute_str][gpu_num]
if u != 'NA':
data[status][num_gpus].append(float(u))
except Exception as e:
print(gpu_util[machineId][current_minute_str])
print(gpu_num)
raise ValueError(e)
current_minute = add_minute(current_minute)
return data
data = get_utilization_data(jobs)
statuses = data.keys()
for i, status in enumerate(statuses):
all_num_gpus = sorted(data[status].keys())
for num_gpus in all_num_gpus:
if num_gpus == 1:
color = 'green'
linestyle = '-'
elif num_gpus == 4:
color = 'blue'
linestyle = '-.'
elif num_gpus == 8:
color = 'red'
linestyle = '--'
elif num_gpus == 16:
color = 'cyan'
linestyle = ':'
x, y = get_cdf(data[status][num_gpus])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.title(status)
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
if i < len(statuses) - 1:
plt.figure()
plt.show()
data = get_utilization_data(jobs, only_large_jobs=True, only_dedicated_servers=True)
aggregate_data = {}
for status in data:
for num_gpus in data[status]:
if num_gpus not in aggregate_data:
aggregate_data[num_gpus] = []
aggregate_data[num_gpus] += data[status][num_gpus]
all_num_gpus = sorted(aggregate_data.keys())
for num_gpus in all_num_gpus:
if num_gpus == 8:
linestyle = '-'
elif num_gpus == 16:
linestyle = '-.'
x, y = get_cdf(aggregate_data[num_gpus])
plt.plot(x, y, label='%s GPU' % (num_gpus), color='black', linestyle=linestyle)
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
plt.show()
mem_util_path = os.path.join(LOGDIR, 'cluster_mem_util')
mem_util = []
with open(mem_util_path, 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
if row[2] == 'NA':
continue
mem_total = float(row[2])
mem_free = float(row[3])
if mem_total == 0:
continue
mem_util.append(100.0 * (mem_total - mem_free) / mem_total)
cpu_util_path = os.path.join(LOGDIR, 'cluster_cpu_util')
cpu_util = []
with open(cpu_util_path, 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
if row[2] == 'NA':
continue
cpu_util.append(float(row[2]))
x, y = get_cdf(cpu_util)
plt.plot(x, y, label='CPU', color='black', linestyle='-')
x, y = get_cdf(mem_util)
plt.plot(x, y, label='Memory', color='black', linestyle='-.')
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.show()
| 0.776114 | 0.855278 |
# RadarCOVID-Report
## Data Extraction
```
import datetime
import json
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
import pycountry
import retry
import seaborn as sns
%matplotlib inline
current_working_directory = os.environ.get("PWD")
if current_working_directory:
os.chdir(current_working_directory)
sns.set()
matplotlib.rcParams["figure.figsize"] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
current_hour = datetime.datetime.utcnow().hour
are_today_results_partial = current_hour != 23
```
### Constants
```
from Modules.ExposureNotification import exposure_notification_io
spain_region_country_code = "ES"
germany_region_country_code = "DE"
default_backend_identifier = spain_region_country_code
backend_generation_days = 7 * 2
daily_summary_days = 7 * 4 * 3
daily_plot_days = 7 * 4
tek_dumps_load_limit = daily_summary_days + 1
```
### Parameters
```
environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER")
if environment_backend_identifier:
report_backend_identifier = environment_backend_identifier
else:
report_backend_identifier = default_backend_identifier
report_backend_identifier
environment_enable_multi_backend_download = \
os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD")
if environment_enable_multi_backend_download:
report_backend_identifiers = None
else:
report_backend_identifiers = [report_backend_identifier]
report_backend_identifiers
environment_invalid_shared_diagnoses_dates = \
os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES")
if environment_invalid_shared_diagnoses_dates:
invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",")
else:
invalid_shared_diagnoses_dates = []
invalid_shared_diagnoses_dates
```
### COVID-19 Cases
```
report_backend_client = \
exposure_notification_io.get_backend_client_with_identifier(
backend_identifier=report_backend_identifier)
@retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10))
def download_cases_dataframe():
return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
confirmed_df_ = download_cases_dataframe()
confirmed_df_.iloc[0]
confirmed_df = confirmed_df_.copy()
confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]]
confirmed_df.rename(
columns={
"date": "sample_date",
"iso_code": "country_code",
},
inplace=True)
def convert_iso_alpha_3_to_alpha_2(x):
try:
return pycountry.countries.get(alpha_3=x).alpha_2
except Exception as e:
logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}")
return None
confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2)
confirmed_df.dropna(inplace=True)
confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True)
confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_df.sort_values("sample_date", inplace=True)
confirmed_df.tail()
confirmed_days = pd.date_range(
start=confirmed_df.iloc[0].sample_date,
end=extraction_datetime)
confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"])
confirmed_days_df["sample_date_string"] = \
confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_days_df.tail()
def sort_source_regions_for_display(source_regions: list) -> list:
if report_backend_identifier in source_regions:
source_regions = [report_backend_identifier] + \
list(sorted(set(source_regions).difference([report_backend_identifier])))
else:
source_regions = list(sorted(source_regions))
return source_regions
report_source_regions = report_backend_client.source_regions_for_date(
date=extraction_datetime.date())
report_source_regions = sort_source_regions_for_display(
source_regions=report_source_regions)
report_source_regions
def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None):
source_regions_at_date_df = confirmed_days_df.copy()
source_regions_at_date_df["source_regions_at_date"] = \
source_regions_at_date_df.sample_date.apply(
lambda x: source_regions_for_date_function(date=x))
source_regions_at_date_df.sort_values("sample_date", inplace=True)
source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \
source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x)))
source_regions_at_date_df.tail()
#%%
source_regions_for_summary_df_ = \
source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy()
source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True)
source_regions_for_summary_df_.tail()
#%%
confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"]
confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns)
for source_regions_group, source_regions_group_series in \
source_regions_at_date_df.groupby("_source_regions_group"):
source_regions_set = set(source_regions_group.split(","))
confirmed_source_regions_set_df = \
confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy()
confirmed_source_regions_group_df = \
confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \
.reset_index().sort_values("sample_date")
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df.merge(
confirmed_days_df[["sample_date_string"]].rename(
columns={"sample_date_string": "sample_date"}),
how="right")
confirmed_source_regions_group_df["new_cases"] = \
confirmed_source_regions_group_df["new_cases"].clip(lower=0)
confirmed_source_regions_group_df["covid_cases"] = \
confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round()
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[confirmed_output_columns]
confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan)
confirmed_source_regions_group_df.fillna(method="ffill", inplace=True)
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[
confirmed_source_regions_group_df.sample_date.isin(
source_regions_group_series.sample_date_string)]
confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df)
result_df = confirmed_output_df.copy()
result_df.tail()
#%%
result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True)
result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left")
result_df.sort_values("sample_date_string", inplace=True)
result_df.fillna(method="ffill", inplace=True)
result_df.tail()
#%%
result_df[["new_cases", "covid_cases"]].plot()
if columns_suffix:
result_df.rename(
columns={
"new_cases": "new_cases_" + columns_suffix,
"covid_cases": "covid_cases_" + columns_suffix},
inplace=True)
return result_df, source_regions_for_summary_df_
confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe(
report_backend_client.source_regions_for_date)
confirmed_es_df, _ = get_cases_dataframe(
lambda date: [spain_region_country_code],
columns_suffix=spain_region_country_code.lower())
```
### Extract API TEKs
```
raw_zip_path_prefix = "Data/TEKs/Raw/"
base_backend_identifiers = [report_backend_identifier]
multi_backend_exposure_keys_df = \
exposure_notification_io.download_exposure_keys_from_backends(
backend_identifiers=report_backend_identifiers,
generation_days=backend_generation_days,
fail_on_error_backend_identifiers=base_backend_identifiers,
save_raw_zip_path_prefix=raw_zip_path_prefix)
multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"]
multi_backend_exposure_keys_df.rename(
columns={
"generation_datetime": "sample_datetime",
"generation_date_string": "sample_date_string",
},
inplace=True)
multi_backend_exposure_keys_df.head()
early_teks_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.rolling_period < 144].copy()
early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6
early_teks_df[early_teks_df.sample_date_string != extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
early_teks_df[early_teks_df.sample_date_string == extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[
"sample_date_string", "region", "key_data"]]
multi_backend_exposure_keys_df.head()
active_regions = \
multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
active_regions
multi_backend_summary_df = multi_backend_exposure_keys_df.groupby(
["sample_date_string", "region"]).key_data.nunique().reset_index() \
.pivot(index="sample_date_string", columns="region") \
.sort_index(ascending=False)
multi_backend_summary_df.rename(
columns={"key_data": "shared_teks_by_generation_date"},
inplace=True)
multi_backend_summary_df.rename_axis("sample_date", inplace=True)
multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int)
multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days)
multi_backend_summary_df.head()
def compute_keys_cross_sharing(x):
teks_x = x.key_data_x.item()
common_teks = set(teks_x).intersection(x.key_data_y.item())
common_teks_fraction = len(common_teks) / len(teks_x)
return pd.Series(dict(
common_teks=common_teks,
common_teks_fraction=common_teks_fraction,
))
multi_backend_exposure_keys_by_region_df = \
multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index()
multi_backend_exposure_keys_by_region_df["_merge"] = True
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_df.merge(
multi_backend_exposure_keys_by_region_df, on="_merge")
multi_backend_exposure_keys_by_region_combination_df.drop(
columns=["_merge"], inplace=True)
if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1:
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_combination_df[
multi_backend_exposure_keys_by_region_combination_df.region_x !=
multi_backend_exposure_keys_by_region_combination_df.region_y]
multi_backend_exposure_keys_cross_sharing_df = \
multi_backend_exposure_keys_by_region_combination_df \
.groupby(["region_x", "region_y"]) \
.apply(compute_keys_cross_sharing) \
.reset_index()
multi_backend_cross_sharing_summary_df = \
multi_backend_exposure_keys_cross_sharing_df.pivot_table(
values=["common_teks_fraction"],
columns="region_x",
index="region_y",
aggfunc=lambda x: x.item())
multi_backend_cross_sharing_summary_df
multi_backend_without_active_region_exposure_keys_df = \
multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier]
multi_backend_without_active_region = \
multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
multi_backend_without_active_region
exposure_keys_summary_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.region == report_backend_identifier]
exposure_keys_summary_df.drop(columns=["region"], inplace=True)
exposure_keys_summary_df = \
exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame()
exposure_keys_summary_df = \
exposure_keys_summary_df.reset_index().set_index("sample_date_string")
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True)
exposure_keys_summary_df.head()
```
### Dump API TEKs
```
tek_list_df = multi_backend_exposure_keys_df[
["sample_date_string", "region", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
["sample_date", "region"]).tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_path_prefix = "Data/TEKs/"
tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json"
tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json"
tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json"
for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]:
os.makedirs(os.path.dirname(path), exist_ok=True)
tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier]
tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
tek_list_current_path,
lines=True, orient="records")
tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json(
tek_list_daily_path,
lines=True, orient="records")
tek_list_base_df.to_json(
tek_list_hourly_path,
lines=True, orient="records")
tek_list_base_df.head()
```
### Load TEK Dumps
```
import glob
def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame(columns=["region"])
file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json"))))
if limit:
file_paths = file_paths[:limit]
for file_path in file_paths:
logging.info(f"Loading TEKs from '{file_path}'...")
iteration_extracted_teks_df = pd.read_json(file_path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
extracted_teks_df["region"] = \
extracted_teks_df.region.fillna(spain_region_country_code).copy()
if region:
extracted_teks_df = \
extracted_teks_df[extracted_teks_df.region == region]
return extracted_teks_df
daily_extracted_teks_df = load_extracted_teks(
mode="Daily",
region=report_backend_identifier,
limit=tek_dumps_load_limit)
daily_extracted_teks_df.head()
exposure_keys_summary_df_ = daily_extracted_teks_df \
.sort_values("extraction_date", ascending=False) \
.groupby("sample_date").tek_list.first() \
.to_frame()
exposure_keys_summary_df_.index.name = "sample_date_string"
exposure_keys_summary_df_["tek_list"] = \
exposure_keys_summary_df_.tek_list.apply(len)
exposure_keys_summary_df_ = exposure_keys_summary_df_ \
.rename(columns={"tek_list": "shared_teks_by_generation_date"}) \
.sort_index(ascending=False)
exposure_keys_summary_df = exposure_keys_summary_df_
exposure_keys_summary_df.head()
```
### Daily New TEKs
```
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
def compute_teks_by_generation_and_upload_date(date):
day_new_teks_set_df = tek_list_df.copy().diff()
try:
day_new_teks_set = day_new_teks_set_df[
day_new_teks_set_df.index == date].tek_list.item()
except ValueError:
day_new_teks_set = None
if pd.isna(day_new_teks_set):
day_new_teks_set = set()
day_new_teks_df = daily_extracted_teks_df[
daily_extracted_teks_df.extraction_date == date].copy()
day_new_teks_df["shared_teks"] = \
day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set))
day_new_teks_df["shared_teks"] = \
day_new_teks_df.shared_teks.apply(len)
day_new_teks_df["upload_date"] = date
day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True)
day_new_teks_df = day_new_teks_df[
["upload_date", "generation_date", "shared_teks"]]
day_new_teks_df["generation_to_upload_days"] = \
(pd.to_datetime(day_new_teks_df.upload_date) -
pd.to_datetime(day_new_teks_df.generation_date)).dt.days
day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0]
return day_new_teks_df
shared_teks_generation_to_upload_df = pd.DataFrame()
for upload_date in daily_extracted_teks_df.extraction_date.unique():
shared_teks_generation_to_upload_df = \
shared_teks_generation_to_upload_df.append(
compute_teks_by_generation_and_upload_date(date=upload_date))
shared_teks_generation_to_upload_df \
.sort_values(["upload_date", "generation_date"], ascending=False, inplace=True)
shared_teks_generation_to_upload_df.tail()
today_new_teks_df = \
shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.upload_date == extraction_date].copy()
today_new_teks_df.tail()
if not today_new_teks_df.empty:
today_new_teks_df.set_index("generation_to_upload_days") \
.sort_index().shared_teks.plot.bar()
generation_to_upload_period_pivot_df = \
shared_teks_generation_to_upload_df[
["upload_date", "generation_to_upload_days", "shared_teks"]] \
.pivot(index="upload_date", columns="generation_to_upload_days") \
.sort_index(ascending=False).fillna(0).astype(int) \
.droplevel(level=0, axis=1)
generation_to_upload_period_pivot_df.head()
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "shared_teks_by_upload_date",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.tail()
shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \
[["upload_date", "shared_teks"]].rename(
columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_teks_uploaded_on_generation_date",
})
shared_teks_uploaded_on_generation_date_df.head()
estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \
.groupby(["upload_date"]).shared_teks.max().reset_index() \
.sort_values(["upload_date"], ascending=False) \
.rename(columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_diagnoses",
})
invalid_shared_diagnoses_dates_mask = \
estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates)
estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0
estimated_shared_diagnoses_df.head()
```
### Hourly New TEKs
```
hourly_extracted_teks_df = load_extracted_teks(
mode="Hourly", region=report_backend_identifier, limit=25)
hourly_extracted_teks_df.head()
hourly_new_tek_count_df = hourly_extracted_teks_df \
.groupby("extraction_date_with_hour").tek_list. \
apply(lambda x: set(sum(x, []))).reset_index().copy()
hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \
.sort_index(ascending=True)
hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff()
hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply(
lambda x: len(x) if not pd.isna(x) else 0)
hourly_new_tek_count_df.rename(columns={
"new_tek_count": "shared_teks_by_upload_date"}, inplace=True)
hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[
"extraction_date_with_hour", "shared_teks_by_upload_date"]]
hourly_new_tek_count_df.head()
hourly_summary_df = hourly_new_tek_count_df.copy()
hourly_summary_df.set_index("extraction_date_with_hour", inplace=True)
hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index()
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df = hourly_summary_df.tail(-1)
hourly_summary_df.head()
```
### Official Statistics
```
import requests
import pandas.io.json
official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics")
official_stats_response.raise_for_status()
official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json())
official_stats_df = official_stats_df_.copy()
official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True)
official_stats_df.head()
official_stats_column_map = {
"date": "sample_date",
"applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated",
"communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated",
}
accumulated_suffix = "_accumulated"
accumulated_values_columns = \
list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values()))
interpolated_values_columns = \
list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns))
official_stats_df = \
official_stats_df[official_stats_column_map.keys()] \
.rename(columns=official_stats_column_map)
official_stats_df["extraction_date"] = extraction_date
official_stats_df.head()
official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json"
previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True)
previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True)
official_stats_df = official_stats_df.append(previous_official_stats_df)
official_stats_df.head()
official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)]
official_stats_df.sort_values("extraction_date", ascending=False, inplace=True)
official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True)
official_stats_df.head()
official_stats_stored_df = official_stats_df.copy()
official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d")
official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True)
official_stats_df.drop(columns=["extraction_date"], inplace=True)
official_stats_df = confirmed_days_df.merge(official_stats_df, how="left")
official_stats_df.sort_values("sample_date", ascending=False, inplace=True)
official_stats_df.head()
official_stats_df[accumulated_values_columns] = \
official_stats_df[accumulated_values_columns] \
.astype(float).interpolate(limit_area="inside")
official_stats_df[interpolated_values_columns] = \
official_stats_df[accumulated_values_columns].diff(periods=-1)
official_stats_df.drop(columns="sample_date", inplace=True)
official_stats_df.head()
```
### Data Merge
```
result_summary_df = exposure_keys_summary_df.merge(
new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
official_stats_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df = confirmed_es_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left")
result_summary_df.set_index(["sample_date", "source_regions"], inplace=True)
result_summary_df.drop(columns=["sample_date_string"], inplace=True)
result_summary_df.sort_index(ascending=False, inplace=True)
result_summary_df.head()
with pd.option_context("mode.use_inf_as_na", True):
result_summary_df = result_summary_df.fillna(0).astype(int)
result_summary_df["teks_per_shared_diagnosis"] = \
(result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case"] = \
(result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0)
result_summary_df.head(daily_plot_days)
def compute_aggregated_results_summary(days) -> pd.DataFrame:
aggregated_result_summary_df = result_summary_df.copy()
aggregated_result_summary_df["covid_cases_for_ratio"] = \
aggregated_result_summary_df.covid_cases.mask(
aggregated_result_summary_df.shared_diagnoses == 0, 0)
aggregated_result_summary_df["covid_cases_for_ratio_es"] = \
aggregated_result_summary_df.covid_cases_es.mask(
aggregated_result_summary_df.shared_diagnoses_es == 0, 0)
aggregated_result_summary_df = aggregated_result_summary_df \
.sort_index(ascending=True).fillna(0).rolling(days).agg({
"covid_cases": "sum",
"covid_cases_es": "sum",
"covid_cases_for_ratio": "sum",
"covid_cases_for_ratio_es": "sum",
"shared_teks_by_generation_date": "sum",
"shared_teks_by_upload_date": "sum",
"shared_diagnoses": "sum",
"shared_diagnoses_es": "sum",
}).sort_index(ascending=False)
with pd.option_context("mode.use_inf_as_na", True):
aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int)
aggregated_result_summary_df["teks_per_shared_diagnosis"] = \
(aggregated_result_summary_df.shared_teks_by_upload_date /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \
(aggregated_result_summary_df.shared_diagnoses /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(aggregated_result_summary_df.shared_diagnoses_es /
aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0)
return aggregated_result_summary_df
aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7)
aggregated_result_with_7_days_window_summary_df.head()
last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1]
last_7_days_summary
aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13)
last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1]
last_14_days_summary
```
## Report Results
```
display_column_name_mapping = {
"sample_date": "Sample\u00A0Date\u00A0(UTC)",
"source_regions": "Source Countries",
"datetime_utc": "Timestamp (UTC)",
"upload_date": "Upload Date (UTC)",
"generation_to_upload_days": "Generation to Upload Period in Days",
"region": "Backend",
"region_x": "Backend\u00A0(A)",
"region_y": "Backend\u00A0(B)",
"common_teks": "Common TEKs Shared Between Backends",
"common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)",
"covid_cases": "COVID-19 Cases (Source Countries)",
"shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)",
"shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)",
"shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)",
"shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)",
"teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)",
"shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)",
"covid_cases_es": "COVID-19 Cases (Spain)",
"app_downloads_es": "App Downloads (Spain – Official)",
"shared_diagnoses_es": "Shared Diagnoses (Spain – Official)",
"shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)",
}
summary_columns = [
"covid_cases",
"shared_teks_by_generation_date",
"shared_teks_by_upload_date",
"shared_teks_uploaded_on_generation_date",
"shared_diagnoses",
"teks_per_shared_diagnosis",
"shared_diagnoses_per_covid_case",
"covid_cases_es",
"app_downloads_es",
"shared_diagnoses_es",
"shared_diagnoses_per_covid_case_es",
]
summary_percentage_columns= [
"shared_diagnoses_per_covid_case_es",
"shared_diagnoses_per_covid_case",
]
```
### Daily Summary Table
```
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[summary_columns]
result_summary_with_display_names_df = result_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
result_summary_with_display_names_df
```
### Daily Summary Plots
```
result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \
.droplevel(level=["source_regions"]) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar(
title=f"Daily Summary",
rot=45, subplots=True, figsize=(15, 30), legend=False)
ax_ = summary_ax_list[0]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.95)
_ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist()))
for percentage_column in summary_percentage_columns:
percentage_column_index = summary_columns.index(percentage_column)
summary_ax_list[percentage_column_index].yaxis \
.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
```
### Daily Generation to Upload Period Table
```
display_generation_to_upload_period_pivot_df = \
generation_to_upload_period_pivot_df \
.head(backend_generation_days)
display_generation_to_upload_period_pivot_df \
.head(backend_generation_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping)
fig, generation_to_upload_period_pivot_table_ax = plt.subplots(
figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df)))
generation_to_upload_period_pivot_table_ax.set_title(
"Shared TEKs Generation to Upload Period Table")
sns.heatmap(
data=display_generation_to_upload_period_pivot_df
.rename_axis(columns=display_column_name_mapping)
.rename_axis(index=display_column_name_mapping),
fmt=".0f",
annot=True,
ax=generation_to_upload_period_pivot_table_ax)
generation_to_upload_period_pivot_table_ax.get_figure().tight_layout()
```
### Hourly Summary Plots
```
hourly_summary_ax_list = hourly_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.plot.bar(
title=f"Last 24h Summary",
rot=45, subplots=True, legend=False)
ax_ = hourly_summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.9)
_ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist()))
```
### Publish Results
```
github_repository = os.environ.get("GITHUB_REPOSITORY")
if github_repository is None:
github_repository = "pvieito/Radar-STATS"
github_project_base_url = "https://github.com/" + github_repository
display_formatters = {
display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "",
}
general_columns = \
list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values()))
general_formatter = lambda x: f"{x}" if x != 0 else ""
display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns)))
daily_summary_table_html = result_summary_with_display_names_df \
.head(daily_plot_days) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.to_html(formatters=display_formatters)
multi_backend_summary_table_html = multi_backend_summary_df \
.head(daily_plot_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(formatters=display_formatters)
def format_multi_backend_cross_sharing_fraction(x):
if pd.isna(x):
return "-"
elif round(x * 100, 1) == 0:
return ""
else:
return f"{x:.1%}"
multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(
classes="table-center",
formatters=display_formatters,
float_format=format_multi_backend_cross_sharing_fraction)
multi_backend_cross_sharing_summary_table_html = \
multi_backend_cross_sharing_summary_table_html \
.replace("<tr>","<tr style=\"text-align: center;\">")
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
covid_cases = \
extraction_date_result_summary_df.covid_cases.item()
shared_teks_by_generation_date = \
extraction_date_result_summary_df.shared_teks_by_generation_date.item()
shared_teks_by_upload_date = \
extraction_date_result_summary_df.shared_teks_by_upload_date.item()
shared_diagnoses = \
extraction_date_result_summary_df.shared_diagnoses.item()
teks_per_shared_diagnosis = \
extraction_date_result_summary_df.teks_per_shared_diagnosis.item()
shared_diagnoses_per_covid_case = \
extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item()
shared_teks_by_upload_date_last_hour = \
extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int)
display_source_regions = ", ".join(report_source_regions)
if len(report_source_regions) == 1:
display_brief_source_regions = report_source_regions[0]
else:
display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺"
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
import dataframe_image as dfi
df = df.copy()
df_styler = df.style.format(display_formatters)
media_path = get_temporary_image_path()
dfi.export(df_styler, media_path)
return media_path
summary_plots_image_path = save_temporary_plot_image(
ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(
df=result_summary_with_display_names_df)
hourly_summary_plots_image_path = save_temporary_plot_image(
ax=hourly_summary_ax_list)
multi_backend_summary_table_image_path = save_temporary_dataframe_image(
df=multi_backend_summary_df)
generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image(
ax=generation_to_upload_period_pivot_table_ax)
```
### Save Results
```
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(
report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(
report_resources_path_prefix + "Summary-Table.html")
hourly_summary_df.to_csv(
report_resources_path_prefix + "Hourly-Summary-Table.csv")
multi_backend_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Summary-Table.csv")
multi_backend_cross_sharing_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv")
generation_to_upload_period_pivot_df.to_csv(
report_resources_path_prefix + "Generation-Upload-Period-Table.csv")
_ = shutil.copyfile(
summary_plots_image_path,
report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(
summary_table_image_path,
report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(
hourly_summary_plots_image_path,
report_resources_path_prefix + "Hourly-Summary-Plots.png")
_ = shutil.copyfile(
multi_backend_summary_table_image_path,
report_resources_path_prefix + "Multi-Backend-Summary-Table.png")
_ = shutil.copyfile(
generation_to_upload_period_pivot_table_image_path,
report_resources_path_prefix + "Generation-Upload-Period-Table.png")
```
### Publish Results as JSON
```
def generate_summary_api_results(df: pd.DataFrame) -> list:
api_df = df.reset_index().copy()
api_df["sample_date_string"] = \
api_df["sample_date"].dt.strftime("%Y-%m-%d")
api_df["source_regions"] = \
api_df["source_regions"].apply(lambda x: x.split(","))
return api_df.to_dict(orient="records")
summary_api_results = \
generate_summary_api_results(df=result_summary_df)
today_summary_api_results = \
generate_summary_api_results(df=extraction_date_result_summary_df)[0]
summary_results = dict(
backend_identifier=report_backend_identifier,
source_regions=report_source_regions,
extraction_datetime=extraction_datetime,
extraction_date=extraction_date,
extraction_date_with_hour=extraction_date_with_hour,
last_hour=dict(
shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour,
shared_diagnoses=0,
),
today=today_summary_api_results,
last_7_days=last_7_days_summary,
last_14_days=last_14_days_summary,
daily_results=summary_api_results)
summary_results = \
json.loads(pd.Series([summary_results]).to_json(orient="records"))[0]
with open(report_resources_path_prefix + "Summary-Results.json", "w") as f:
json.dump(summary_results, f, indent=4)
```
### Publish on README
```
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
readme_contents = readme_contents.format(
extraction_date_with_hour=extraction_date_with_hour,
github_project_base_url=github_project_base_url,
daily_summary_table_html=daily_summary_table_html,
multi_backend_summary_table_html=multi_backend_summary_table_html,
multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html,
display_source_regions=display_source_regions)
with open("README.md", "w") as f:
f.write(readme_contents)
```
### Publish on Twitter
```
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule" and \
(shared_teks_by_upload_date_last_hour or not are_today_results_partial):
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
generation_to_upload_period_pivot_table_image_media.media_id,
]
if are_today_results_partial:
today_addendum = " (Partial)"
else:
today_addendum = ""
def format_shared_diagnoses_per_covid_case(value) -> str:
if value == 0:
return "–"
return f"≤{value:.2%}"
display_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case)
display_last_14_days_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"])
display_last_14_days_shared_diagnoses_per_covid_case_es = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"])
status = textwrap.dedent(f"""
#RadarCOVID – {extraction_date_with_hour}
Today{today_addendum}:
- Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour)
- Shared Diagnoses: ≤{shared_diagnoses:.0f}
- Usage Ratio: {display_shared_diagnoses_per_covid_case}
Last 14 Days:
- Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case}
- Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es}
Info: {github_project_base_url}#documentation
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
```
|
github_jupyter
|
import datetime
import json
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
import pycountry
import retry
import seaborn as sns
%matplotlib inline
current_working_directory = os.environ.get("PWD")
if current_working_directory:
os.chdir(current_working_directory)
sns.set()
matplotlib.rcParams["figure.figsize"] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
current_hour = datetime.datetime.utcnow().hour
are_today_results_partial = current_hour != 23
from Modules.ExposureNotification import exposure_notification_io
spain_region_country_code = "ES"
germany_region_country_code = "DE"
default_backend_identifier = spain_region_country_code
backend_generation_days = 7 * 2
daily_summary_days = 7 * 4 * 3
daily_plot_days = 7 * 4
tek_dumps_load_limit = daily_summary_days + 1
environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER")
if environment_backend_identifier:
report_backend_identifier = environment_backend_identifier
else:
report_backend_identifier = default_backend_identifier
report_backend_identifier
environment_enable_multi_backend_download = \
os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD")
if environment_enable_multi_backend_download:
report_backend_identifiers = None
else:
report_backend_identifiers = [report_backend_identifier]
report_backend_identifiers
environment_invalid_shared_diagnoses_dates = \
os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES")
if environment_invalid_shared_diagnoses_dates:
invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",")
else:
invalid_shared_diagnoses_dates = []
invalid_shared_diagnoses_dates
report_backend_client = \
exposure_notification_io.get_backend_client_with_identifier(
backend_identifier=report_backend_identifier)
@retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10))
def download_cases_dataframe():
return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
confirmed_df_ = download_cases_dataframe()
confirmed_df_.iloc[0]
confirmed_df = confirmed_df_.copy()
confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]]
confirmed_df.rename(
columns={
"date": "sample_date",
"iso_code": "country_code",
},
inplace=True)
def convert_iso_alpha_3_to_alpha_2(x):
try:
return pycountry.countries.get(alpha_3=x).alpha_2
except Exception as e:
logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}")
return None
confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2)
confirmed_df.dropna(inplace=True)
confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True)
confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_df.sort_values("sample_date", inplace=True)
confirmed_df.tail()
confirmed_days = pd.date_range(
start=confirmed_df.iloc[0].sample_date,
end=extraction_datetime)
confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"])
confirmed_days_df["sample_date_string"] = \
confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_days_df.tail()
def sort_source_regions_for_display(source_regions: list) -> list:
if report_backend_identifier in source_regions:
source_regions = [report_backend_identifier] + \
list(sorted(set(source_regions).difference([report_backend_identifier])))
else:
source_regions = list(sorted(source_regions))
return source_regions
report_source_regions = report_backend_client.source_regions_for_date(
date=extraction_datetime.date())
report_source_regions = sort_source_regions_for_display(
source_regions=report_source_regions)
report_source_regions
def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None):
source_regions_at_date_df = confirmed_days_df.copy()
source_regions_at_date_df["source_regions_at_date"] = \
source_regions_at_date_df.sample_date.apply(
lambda x: source_regions_for_date_function(date=x))
source_regions_at_date_df.sort_values("sample_date", inplace=True)
source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \
source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x)))
source_regions_at_date_df.tail()
#%%
source_regions_for_summary_df_ = \
source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy()
source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True)
source_regions_for_summary_df_.tail()
#%%
confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"]
confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns)
for source_regions_group, source_regions_group_series in \
source_regions_at_date_df.groupby("_source_regions_group"):
source_regions_set = set(source_regions_group.split(","))
confirmed_source_regions_set_df = \
confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy()
confirmed_source_regions_group_df = \
confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \
.reset_index().sort_values("sample_date")
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df.merge(
confirmed_days_df[["sample_date_string"]].rename(
columns={"sample_date_string": "sample_date"}),
how="right")
confirmed_source_regions_group_df["new_cases"] = \
confirmed_source_regions_group_df["new_cases"].clip(lower=0)
confirmed_source_regions_group_df["covid_cases"] = \
confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round()
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[confirmed_output_columns]
confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan)
confirmed_source_regions_group_df.fillna(method="ffill", inplace=True)
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[
confirmed_source_regions_group_df.sample_date.isin(
source_regions_group_series.sample_date_string)]
confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df)
result_df = confirmed_output_df.copy()
result_df.tail()
#%%
result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True)
result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left")
result_df.sort_values("sample_date_string", inplace=True)
result_df.fillna(method="ffill", inplace=True)
result_df.tail()
#%%
result_df[["new_cases", "covid_cases"]].plot()
if columns_suffix:
result_df.rename(
columns={
"new_cases": "new_cases_" + columns_suffix,
"covid_cases": "covid_cases_" + columns_suffix},
inplace=True)
return result_df, source_regions_for_summary_df_
confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe(
report_backend_client.source_regions_for_date)
confirmed_es_df, _ = get_cases_dataframe(
lambda date: [spain_region_country_code],
columns_suffix=spain_region_country_code.lower())
raw_zip_path_prefix = "Data/TEKs/Raw/"
base_backend_identifiers = [report_backend_identifier]
multi_backend_exposure_keys_df = \
exposure_notification_io.download_exposure_keys_from_backends(
backend_identifiers=report_backend_identifiers,
generation_days=backend_generation_days,
fail_on_error_backend_identifiers=base_backend_identifiers,
save_raw_zip_path_prefix=raw_zip_path_prefix)
multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"]
multi_backend_exposure_keys_df.rename(
columns={
"generation_datetime": "sample_datetime",
"generation_date_string": "sample_date_string",
},
inplace=True)
multi_backend_exposure_keys_df.head()
early_teks_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.rolling_period < 144].copy()
early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6
early_teks_df[early_teks_df.sample_date_string != extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
early_teks_df[early_teks_df.sample_date_string == extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[
"sample_date_string", "region", "key_data"]]
multi_backend_exposure_keys_df.head()
active_regions = \
multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
active_regions
multi_backend_summary_df = multi_backend_exposure_keys_df.groupby(
["sample_date_string", "region"]).key_data.nunique().reset_index() \
.pivot(index="sample_date_string", columns="region") \
.sort_index(ascending=False)
multi_backend_summary_df.rename(
columns={"key_data": "shared_teks_by_generation_date"},
inplace=True)
multi_backend_summary_df.rename_axis("sample_date", inplace=True)
multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int)
multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days)
multi_backend_summary_df.head()
def compute_keys_cross_sharing(x):
teks_x = x.key_data_x.item()
common_teks = set(teks_x).intersection(x.key_data_y.item())
common_teks_fraction = len(common_teks) / len(teks_x)
return pd.Series(dict(
common_teks=common_teks,
common_teks_fraction=common_teks_fraction,
))
multi_backend_exposure_keys_by_region_df = \
multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index()
multi_backend_exposure_keys_by_region_df["_merge"] = True
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_df.merge(
multi_backend_exposure_keys_by_region_df, on="_merge")
multi_backend_exposure_keys_by_region_combination_df.drop(
columns=["_merge"], inplace=True)
if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1:
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_combination_df[
multi_backend_exposure_keys_by_region_combination_df.region_x !=
multi_backend_exposure_keys_by_region_combination_df.region_y]
multi_backend_exposure_keys_cross_sharing_df = \
multi_backend_exposure_keys_by_region_combination_df \
.groupby(["region_x", "region_y"]) \
.apply(compute_keys_cross_sharing) \
.reset_index()
multi_backend_cross_sharing_summary_df = \
multi_backend_exposure_keys_cross_sharing_df.pivot_table(
values=["common_teks_fraction"],
columns="region_x",
index="region_y",
aggfunc=lambda x: x.item())
multi_backend_cross_sharing_summary_df
multi_backend_without_active_region_exposure_keys_df = \
multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier]
multi_backend_without_active_region = \
multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
multi_backend_without_active_region
exposure_keys_summary_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.region == report_backend_identifier]
exposure_keys_summary_df.drop(columns=["region"], inplace=True)
exposure_keys_summary_df = \
exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame()
exposure_keys_summary_df = \
exposure_keys_summary_df.reset_index().set_index("sample_date_string")
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True)
exposure_keys_summary_df.head()
tek_list_df = multi_backend_exposure_keys_df[
["sample_date_string", "region", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
["sample_date", "region"]).tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_path_prefix = "Data/TEKs/"
tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json"
tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json"
tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json"
for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]:
os.makedirs(os.path.dirname(path), exist_ok=True)
tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier]
tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
tek_list_current_path,
lines=True, orient="records")
tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json(
tek_list_daily_path,
lines=True, orient="records")
tek_list_base_df.to_json(
tek_list_hourly_path,
lines=True, orient="records")
tek_list_base_df.head()
import glob
def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame(columns=["region"])
file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json"))))
if limit:
file_paths = file_paths[:limit]
for file_path in file_paths:
logging.info(f"Loading TEKs from '{file_path}'...")
iteration_extracted_teks_df = pd.read_json(file_path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
extracted_teks_df["region"] = \
extracted_teks_df.region.fillna(spain_region_country_code).copy()
if region:
extracted_teks_df = \
extracted_teks_df[extracted_teks_df.region == region]
return extracted_teks_df
daily_extracted_teks_df = load_extracted_teks(
mode="Daily",
region=report_backend_identifier,
limit=tek_dumps_load_limit)
daily_extracted_teks_df.head()
exposure_keys_summary_df_ = daily_extracted_teks_df \
.sort_values("extraction_date", ascending=False) \
.groupby("sample_date").tek_list.first() \
.to_frame()
exposure_keys_summary_df_.index.name = "sample_date_string"
exposure_keys_summary_df_["tek_list"] = \
exposure_keys_summary_df_.tek_list.apply(len)
exposure_keys_summary_df_ = exposure_keys_summary_df_ \
.rename(columns={"tek_list": "shared_teks_by_generation_date"}) \
.sort_index(ascending=False)
exposure_keys_summary_df = exposure_keys_summary_df_
exposure_keys_summary_df.head()
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
def compute_teks_by_generation_and_upload_date(date):
day_new_teks_set_df = tek_list_df.copy().diff()
try:
day_new_teks_set = day_new_teks_set_df[
day_new_teks_set_df.index == date].tek_list.item()
except ValueError:
day_new_teks_set = None
if pd.isna(day_new_teks_set):
day_new_teks_set = set()
day_new_teks_df = daily_extracted_teks_df[
daily_extracted_teks_df.extraction_date == date].copy()
day_new_teks_df["shared_teks"] = \
day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set))
day_new_teks_df["shared_teks"] = \
day_new_teks_df.shared_teks.apply(len)
day_new_teks_df["upload_date"] = date
day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True)
day_new_teks_df = day_new_teks_df[
["upload_date", "generation_date", "shared_teks"]]
day_new_teks_df["generation_to_upload_days"] = \
(pd.to_datetime(day_new_teks_df.upload_date) -
pd.to_datetime(day_new_teks_df.generation_date)).dt.days
day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0]
return day_new_teks_df
shared_teks_generation_to_upload_df = pd.DataFrame()
for upload_date in daily_extracted_teks_df.extraction_date.unique():
shared_teks_generation_to_upload_df = \
shared_teks_generation_to_upload_df.append(
compute_teks_by_generation_and_upload_date(date=upload_date))
shared_teks_generation_to_upload_df \
.sort_values(["upload_date", "generation_date"], ascending=False, inplace=True)
shared_teks_generation_to_upload_df.tail()
today_new_teks_df = \
shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.upload_date == extraction_date].copy()
today_new_teks_df.tail()
if not today_new_teks_df.empty:
today_new_teks_df.set_index("generation_to_upload_days") \
.sort_index().shared_teks.plot.bar()
generation_to_upload_period_pivot_df = \
shared_teks_generation_to_upload_df[
["upload_date", "generation_to_upload_days", "shared_teks"]] \
.pivot(index="upload_date", columns="generation_to_upload_days") \
.sort_index(ascending=False).fillna(0).astype(int) \
.droplevel(level=0, axis=1)
generation_to_upload_period_pivot_df.head()
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "shared_teks_by_upload_date",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.tail()
shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \
[["upload_date", "shared_teks"]].rename(
columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_teks_uploaded_on_generation_date",
})
shared_teks_uploaded_on_generation_date_df.head()
estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \
.groupby(["upload_date"]).shared_teks.max().reset_index() \
.sort_values(["upload_date"], ascending=False) \
.rename(columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_diagnoses",
})
invalid_shared_diagnoses_dates_mask = \
estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates)
estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0
estimated_shared_diagnoses_df.head()
hourly_extracted_teks_df = load_extracted_teks(
mode="Hourly", region=report_backend_identifier, limit=25)
hourly_extracted_teks_df.head()
hourly_new_tek_count_df = hourly_extracted_teks_df \
.groupby("extraction_date_with_hour").tek_list. \
apply(lambda x: set(sum(x, []))).reset_index().copy()
hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \
.sort_index(ascending=True)
hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff()
hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply(
lambda x: len(x) if not pd.isna(x) else 0)
hourly_new_tek_count_df.rename(columns={
"new_tek_count": "shared_teks_by_upload_date"}, inplace=True)
hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[
"extraction_date_with_hour", "shared_teks_by_upload_date"]]
hourly_new_tek_count_df.head()
hourly_summary_df = hourly_new_tek_count_df.copy()
hourly_summary_df.set_index("extraction_date_with_hour", inplace=True)
hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index()
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df = hourly_summary_df.tail(-1)
hourly_summary_df.head()
import requests
import pandas.io.json
official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics")
official_stats_response.raise_for_status()
official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json())
official_stats_df = official_stats_df_.copy()
official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True)
official_stats_df.head()
official_stats_column_map = {
"date": "sample_date",
"applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated",
"communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated",
}
accumulated_suffix = "_accumulated"
accumulated_values_columns = \
list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values()))
interpolated_values_columns = \
list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns))
official_stats_df = \
official_stats_df[official_stats_column_map.keys()] \
.rename(columns=official_stats_column_map)
official_stats_df["extraction_date"] = extraction_date
official_stats_df.head()
official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json"
previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True)
previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True)
official_stats_df = official_stats_df.append(previous_official_stats_df)
official_stats_df.head()
official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)]
official_stats_df.sort_values("extraction_date", ascending=False, inplace=True)
official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True)
official_stats_df.head()
official_stats_stored_df = official_stats_df.copy()
official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d")
official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True)
official_stats_df.drop(columns=["extraction_date"], inplace=True)
official_stats_df = confirmed_days_df.merge(official_stats_df, how="left")
official_stats_df.sort_values("sample_date", ascending=False, inplace=True)
official_stats_df.head()
official_stats_df[accumulated_values_columns] = \
official_stats_df[accumulated_values_columns] \
.astype(float).interpolate(limit_area="inside")
official_stats_df[interpolated_values_columns] = \
official_stats_df[accumulated_values_columns].diff(periods=-1)
official_stats_df.drop(columns="sample_date", inplace=True)
official_stats_df.head()
result_summary_df = exposure_keys_summary_df.merge(
new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
official_stats_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df = confirmed_es_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left")
result_summary_df.set_index(["sample_date", "source_regions"], inplace=True)
result_summary_df.drop(columns=["sample_date_string"], inplace=True)
result_summary_df.sort_index(ascending=False, inplace=True)
result_summary_df.head()
with pd.option_context("mode.use_inf_as_na", True):
result_summary_df = result_summary_df.fillna(0).astype(int)
result_summary_df["teks_per_shared_diagnosis"] = \
(result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case"] = \
(result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0)
result_summary_df.head(daily_plot_days)
def compute_aggregated_results_summary(days) -> pd.DataFrame:
aggregated_result_summary_df = result_summary_df.copy()
aggregated_result_summary_df["covid_cases_for_ratio"] = \
aggregated_result_summary_df.covid_cases.mask(
aggregated_result_summary_df.shared_diagnoses == 0, 0)
aggregated_result_summary_df["covid_cases_for_ratio_es"] = \
aggregated_result_summary_df.covid_cases_es.mask(
aggregated_result_summary_df.shared_diagnoses_es == 0, 0)
aggregated_result_summary_df = aggregated_result_summary_df \
.sort_index(ascending=True).fillna(0).rolling(days).agg({
"covid_cases": "sum",
"covid_cases_es": "sum",
"covid_cases_for_ratio": "sum",
"covid_cases_for_ratio_es": "sum",
"shared_teks_by_generation_date": "sum",
"shared_teks_by_upload_date": "sum",
"shared_diagnoses": "sum",
"shared_diagnoses_es": "sum",
}).sort_index(ascending=False)
with pd.option_context("mode.use_inf_as_na", True):
aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int)
aggregated_result_summary_df["teks_per_shared_diagnosis"] = \
(aggregated_result_summary_df.shared_teks_by_upload_date /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \
(aggregated_result_summary_df.shared_diagnoses /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(aggregated_result_summary_df.shared_diagnoses_es /
aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0)
return aggregated_result_summary_df
aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7)
aggregated_result_with_7_days_window_summary_df.head()
last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1]
last_7_days_summary
aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13)
last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1]
last_14_days_summary
display_column_name_mapping = {
"sample_date": "Sample\u00A0Date\u00A0(UTC)",
"source_regions": "Source Countries",
"datetime_utc": "Timestamp (UTC)",
"upload_date": "Upload Date (UTC)",
"generation_to_upload_days": "Generation to Upload Period in Days",
"region": "Backend",
"region_x": "Backend\u00A0(A)",
"region_y": "Backend\u00A0(B)",
"common_teks": "Common TEKs Shared Between Backends",
"common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)",
"covid_cases": "COVID-19 Cases (Source Countries)",
"shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)",
"shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)",
"shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)",
"shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)",
"teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)",
"shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)",
"covid_cases_es": "COVID-19 Cases (Spain)",
"app_downloads_es": "App Downloads (Spain – Official)",
"shared_diagnoses_es": "Shared Diagnoses (Spain – Official)",
"shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)",
}
summary_columns = [
"covid_cases",
"shared_teks_by_generation_date",
"shared_teks_by_upload_date",
"shared_teks_uploaded_on_generation_date",
"shared_diagnoses",
"teks_per_shared_diagnosis",
"shared_diagnoses_per_covid_case",
"covid_cases_es",
"app_downloads_es",
"shared_diagnoses_es",
"shared_diagnoses_per_covid_case_es",
]
summary_percentage_columns= [
"shared_diagnoses_per_covid_case_es",
"shared_diagnoses_per_covid_case",
]
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[summary_columns]
result_summary_with_display_names_df = result_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
result_summary_with_display_names_df
result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \
.droplevel(level=["source_regions"]) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar(
title=f"Daily Summary",
rot=45, subplots=True, figsize=(15, 30), legend=False)
ax_ = summary_ax_list[0]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.95)
_ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist()))
for percentage_column in summary_percentage_columns:
percentage_column_index = summary_columns.index(percentage_column)
summary_ax_list[percentage_column_index].yaxis \
.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
display_generation_to_upload_period_pivot_df = \
generation_to_upload_period_pivot_df \
.head(backend_generation_days)
display_generation_to_upload_period_pivot_df \
.head(backend_generation_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping)
fig, generation_to_upload_period_pivot_table_ax = plt.subplots(
figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df)))
generation_to_upload_period_pivot_table_ax.set_title(
"Shared TEKs Generation to Upload Period Table")
sns.heatmap(
data=display_generation_to_upload_period_pivot_df
.rename_axis(columns=display_column_name_mapping)
.rename_axis(index=display_column_name_mapping),
fmt=".0f",
annot=True,
ax=generation_to_upload_period_pivot_table_ax)
generation_to_upload_period_pivot_table_ax.get_figure().tight_layout()
hourly_summary_ax_list = hourly_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.plot.bar(
title=f"Last 24h Summary",
rot=45, subplots=True, legend=False)
ax_ = hourly_summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.9)
_ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist()))
github_repository = os.environ.get("GITHUB_REPOSITORY")
if github_repository is None:
github_repository = "pvieito/Radar-STATS"
github_project_base_url = "https://github.com/" + github_repository
display_formatters = {
display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "",
}
general_columns = \
list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values()))
general_formatter = lambda x: f"{x}" if x != 0 else ""
display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns)))
daily_summary_table_html = result_summary_with_display_names_df \
.head(daily_plot_days) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.to_html(formatters=display_formatters)
multi_backend_summary_table_html = multi_backend_summary_df \
.head(daily_plot_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(formatters=display_formatters)
def format_multi_backend_cross_sharing_fraction(x):
if pd.isna(x):
return "-"
elif round(x * 100, 1) == 0:
return ""
else:
return f"{x:.1%}"
multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(
classes="table-center",
formatters=display_formatters,
float_format=format_multi_backend_cross_sharing_fraction)
multi_backend_cross_sharing_summary_table_html = \
multi_backend_cross_sharing_summary_table_html \
.replace("<tr>","<tr style=\"text-align: center;\">")
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
covid_cases = \
extraction_date_result_summary_df.covid_cases.item()
shared_teks_by_generation_date = \
extraction_date_result_summary_df.shared_teks_by_generation_date.item()
shared_teks_by_upload_date = \
extraction_date_result_summary_df.shared_teks_by_upload_date.item()
shared_diagnoses = \
extraction_date_result_summary_df.shared_diagnoses.item()
teks_per_shared_diagnosis = \
extraction_date_result_summary_df.teks_per_shared_diagnosis.item()
shared_diagnoses_per_covid_case = \
extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item()
shared_teks_by_upload_date_last_hour = \
extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int)
display_source_regions = ", ".join(report_source_regions)
if len(report_source_regions) == 1:
display_brief_source_regions = report_source_regions[0]
else:
display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺"
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
import dataframe_image as dfi
df = df.copy()
df_styler = df.style.format(display_formatters)
media_path = get_temporary_image_path()
dfi.export(df_styler, media_path)
return media_path
summary_plots_image_path = save_temporary_plot_image(
ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(
df=result_summary_with_display_names_df)
hourly_summary_plots_image_path = save_temporary_plot_image(
ax=hourly_summary_ax_list)
multi_backend_summary_table_image_path = save_temporary_dataframe_image(
df=multi_backend_summary_df)
generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image(
ax=generation_to_upload_period_pivot_table_ax)
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(
report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(
report_resources_path_prefix + "Summary-Table.html")
hourly_summary_df.to_csv(
report_resources_path_prefix + "Hourly-Summary-Table.csv")
multi_backend_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Summary-Table.csv")
multi_backend_cross_sharing_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv")
generation_to_upload_period_pivot_df.to_csv(
report_resources_path_prefix + "Generation-Upload-Period-Table.csv")
_ = shutil.copyfile(
summary_plots_image_path,
report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(
summary_table_image_path,
report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(
hourly_summary_plots_image_path,
report_resources_path_prefix + "Hourly-Summary-Plots.png")
_ = shutil.copyfile(
multi_backend_summary_table_image_path,
report_resources_path_prefix + "Multi-Backend-Summary-Table.png")
_ = shutil.copyfile(
generation_to_upload_period_pivot_table_image_path,
report_resources_path_prefix + "Generation-Upload-Period-Table.png")
def generate_summary_api_results(df: pd.DataFrame) -> list:
api_df = df.reset_index().copy()
api_df["sample_date_string"] = \
api_df["sample_date"].dt.strftime("%Y-%m-%d")
api_df["source_regions"] = \
api_df["source_regions"].apply(lambda x: x.split(","))
return api_df.to_dict(orient="records")
summary_api_results = \
generate_summary_api_results(df=result_summary_df)
today_summary_api_results = \
generate_summary_api_results(df=extraction_date_result_summary_df)[0]
summary_results = dict(
backend_identifier=report_backend_identifier,
source_regions=report_source_regions,
extraction_datetime=extraction_datetime,
extraction_date=extraction_date,
extraction_date_with_hour=extraction_date_with_hour,
last_hour=dict(
shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour,
shared_diagnoses=0,
),
today=today_summary_api_results,
last_7_days=last_7_days_summary,
last_14_days=last_14_days_summary,
daily_results=summary_api_results)
summary_results = \
json.loads(pd.Series([summary_results]).to_json(orient="records"))[0]
with open(report_resources_path_prefix + "Summary-Results.json", "w") as f:
json.dump(summary_results, f, indent=4)
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
readme_contents = readme_contents.format(
extraction_date_with_hour=extraction_date_with_hour,
github_project_base_url=github_project_base_url,
daily_summary_table_html=daily_summary_table_html,
multi_backend_summary_table_html=multi_backend_summary_table_html,
multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html,
display_source_regions=display_source_regions)
with open("README.md", "w") as f:
f.write(readme_contents)
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule" and \
(shared_teks_by_upload_date_last_hour or not are_today_results_partial):
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
generation_to_upload_period_pivot_table_image_media.media_id,
]
if are_today_results_partial:
today_addendum = " (Partial)"
else:
today_addendum = ""
def format_shared_diagnoses_per_covid_case(value) -> str:
if value == 0:
return "–"
return f"≤{value:.2%}"
display_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case)
display_last_14_days_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"])
display_last_14_days_shared_diagnoses_per_covid_case_es = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"])
status = textwrap.dedent(f"""
#RadarCOVID – {extraction_date_with_hour}
Today{today_addendum}:
- Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour)
- Shared Diagnoses: ≤{shared_diagnoses:.0f}
- Usage Ratio: {display_shared_diagnoses_per_covid_case}
Last 14 Days:
- Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case}
- Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es}
Info: {github_project_base_url}#documentation
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
| 0.268749 | 0.215464 |
<font style="font-size:96px; font-weight:bolder; color:#0040a0"><img src="http://montage.ipac.caltech.edu/docs/M51_logo.png" alt="M" style="float: left; padding: 25px 30px 25px 0px;" /></font>
<i><b>Montage</b> Montage is an astronomical image toolkit with components for reprojection, background matching, coaddition and visualization of FITS files. It can be used as a set of command-line tools (Linux, OS X and Windows), C library calls (Linux and OS X) and as Python binary extension modules.
The Montage source is written in ANSI-C and code can be downloaded from GitHub ( https://github.com/Caltech-IPAC/Montage ). The Python package can be installed from PyPI ("</i>pip install MontagePy<i>"). The package has no external dependencies. See http://montage.ipac.caltech.edu/ for details on the design and applications of Montage.
# MontagePy.main modules: mArchiveList
The Montage modules are generally used as steps in a workflow to create a mosaic of a set of input images. These steps are: determine the geometry of the mosaic on the sky, reproject the images to a common frame and spatial sampling; rectify the backgrounds to a common level, and coadd the images into a mosaic. This page illustrates the use of one Montage module, mArchiveList, which is used to search for images from a given archive in a region of the sky.
Visit <a href="Mosaic.ipynb">Building a Mosaic with Montage</a> to see how mArchiveList is used as part of a workflow to creage a mosaic (or the <a href="Mosaic_oneshot.ipynb"> one shot </a> version if you just want to see the commands). See the complete list of Montage Notebooks <a href="http://montage.ipac.caltech.edu/MontageNotebooks">here</a>.
```
from MontagePy.main import mArchiveList
help(mArchiveList)
```
## mArchiveList Example
mArchiveList can be used to retrieve a list of images overlapping a region on the sky for the following missions: 2MASS, SDSS, WISE and DSS. These were chosen because they are all-sky or at least very large contiguous areas of the sky. This list contains some metadata describing the image coverage, a URL to retrieve the image, and a suggested name for a local file name to use on download.
The following call retrieves a list of 2MASS J-band images for a 0.5 degree region around M17. <b>NOTE:</b> Not all archive metadata searches are equally good about matching the sky coverage you intend. We find it best to ask for metadata for a larger area than you need, then narrow the list down using the header for the region you want and mCoverageCheck.
```
rtn = mArchiveList('2MASS', 'J', 'M17', 2., 2., 'work/M17/archive.tbl')
print(rtn)
```
Montage modules return JSON structures. They always include a status (0: success; 1: error) and a variable number of informational parameters. Here it shows the count for the returned list of images.
## Archive List Data
The returned table is in IPAC ASCII format, the default for Montage. You can read it into a Pandas dataframe with AstroPy. Here we extract the URL used to download each data file and the local file name.
```
import os
import numpy as np
import pandas as pd
from astropy.io import ascii
ipactable = ascii.read('work/M17/archive.tbl').to_pandas()
ipactable
```
## mArchiveList Error Handling
If mArchiveList encounters an error, the return structure will just have two elements: a status of 1 ("error") and a message string that tries to diagnose the reason for the error.
For instance, if the user asks for an unsupported dataset:
```
rtn = mArchiveList('OtherMission', 'A', 'M51', 0.5, 0.5, 'remote.tbl')
print(rtn)
```
<p> </p>
## Classic Montage: mArchiveList as a Stand-Alone Program
### Unix/Windows Command-line Arguments
mArchiveList can also be run as a command-line tool in Linux, OS X, and Windows:
<p><tt>
<b>Usage:</b> mArchiveList [-d] survey band object|location width height outfile (object/location must be a single argument string)
</tt></p>
<p> </p>
## mArchiveList as a Library Call
If you are writing in C/C++ on Linux or OSX, mArchiveList can be accessed as a library function:
<pre>
/*-***********************************************************************/
/* */
/* mArchiveList -- Given a location on the sky, archive name, and size */
/* in degrees contact the IRSA server to retreive a list of archive */
/* images. The list contains enough information to support mArchiveGet */
/* downloads. */
/* */
/* char *survey Data survey to search (e.g. 2MASS, SDSS, */
/* WISE, etc.) */
/* */
/* char *band Wavelength band (e.g. J for 2MASS, g for */
/* SDSS) */
/* */
/* char *locstr A (quoted if necessary) string containing */
/* a coordinate or the name of an object on */
/* the sky */
/* */
/* double width Image width in degrees */
/* double height Image height in degrees */
/* */
/* char *outfile Output FITS header file */
/* */
/* int debug Debugging output flag */
/* */
/*************************************************************************/
struct mArchiveListReturn *mArchiveList(char *survey, char *band, char *location,
double width, double height,
char *outfile, int debug)
</pre>
<p><b>Return Structure</b></p>
<pre>
struct mArchiveListReturn
{
int status; // Return status (0: OK, 1:ERROR)
char msg [1024]; // Return message (for error return)
char json[4096]; // Return parameters as JSON string
int count; // Number of lines in output file.
};
</pre>
|
github_jupyter
|
from MontagePy.main import mArchiveList
help(mArchiveList)
rtn = mArchiveList('2MASS', 'J', 'M17', 2., 2., 'work/M17/archive.tbl')
print(rtn)
import os
import numpy as np
import pandas as pd
from astropy.io import ascii
ipactable = ascii.read('work/M17/archive.tbl').to_pandas()
ipactable
rtn = mArchiveList('OtherMission', 'A', 'M51', 0.5, 0.5, 'remote.tbl')
print(rtn)
| 0.237576 | 0.940024 |
# Classifying Fashion-MNIST
Now it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world.
<img src='assets/fashion-mnist-sprite.png' width=500px>
In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this.
First off, let's load the dataset through torchvision.
```
import torch
from torchvision import datasets, transforms
import helper
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Download and load the training data
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
```
Here we can see one of the images.
```
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
```
## Building the network
Here you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers.
```
from torch import nn, optim
import torch.nn.functional as F
# TODO: Define your network architecture here
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
# make sure input tensor is flattened
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.log_softmax(self.fc4(x), dim=1)
return x
```
# Train the network
Now you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) (something like `nn.CrossEntropyLoss` or `nn.NLLLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`).
Then write the training code. Remember the training pass is a fairly straightforward process:
* Make a forward pass through the network to get the logits
* Use the logits to calculate the loss
* Perform a backward pass through the network with `loss.backward()` to calculate the gradients
* Take a step with the optimizer to update the weights
By adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4.
```
# TODO: Create the network, define the criterion and optimizer
model = Classifier()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
# TODO: Train the network here
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
log_ps = model(images)
loss = criterion(log_ps, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
# Test out your network!
dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[1]
# TODO: Calculate the class probabilities (softmax) for img
ps = torch.exp(model(img))
# Plot the image and probabilities
helper.view_classify(img, ps, version='Fashion')
```
|
github_jupyter
|
import torch
from torchvision import datasets, transforms
import helper
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Download and load the training data
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
from torch import nn, optim
import torch.nn.functional as F
# TODO: Define your network architecture here
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
# make sure input tensor is flattened
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.log_softmax(self.fc4(x), dim=1)
return x
# TODO: Create the network, define the criterion and optimizer
model = Classifier()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
# TODO: Train the network here
epochs = 5
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
log_ps = model(images)
loss = criterion(log_ps, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
print(f"Training loss: {running_loss/len(trainloader)}")
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
# Test out your network!
dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[1]
# TODO: Calculate the class probabilities (softmax) for img
ps = torch.exp(model(img))
# Plot the image and probabilities
helper.view_classify(img, ps, version='Fashion')
| 0.807309 | 0.99015 |
```
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
# Install TorchAudio
!pip install torchaudio>=0.10.0 -f https://download.pytorch.org/whl/torch_stable.html
```
# **SPEAKER RECOGNITION**
Speaker Recognition (SR) is a broad research area that solves two major tasks: speaker identification (who is speaking?) and
speaker verification (is the speaker who they claim to be?). In this work, we focus on text-independent speaker recognition when the identity of the speaker is based on how the speech is spoken,
not necessarily in what is being said. Typically such SR systems operate on unconstrained speech utterances,
which are converted into vectors of fixed length, called speaker embeddings. Speaker embeddings are also used in
automatic speech recognition (ASR) and speech synthesis.
In this tutorial, we shall first train these embeddings on speaker-related datasets, and then get speaker embeddings from a pretrained network for a new dataset. Since Google Colab has very slow read-write speeds, I'll be demonstrating this tutorial using [an4](http://www.speech.cs.cmu.edu/databases/an4/).
Instead, if you'd like to try on a bigger dataset like [hi-mia](https://arxiv.org/abs/1912.01231) use the [get_hi-mia-data.py](https://github.com/NVIDIA/NeMo/blob/stable/scripts/dataset_processing/get_hi-mia_data.py) script to download the necessary files, extract them, also re-sample to 16Khz if any of these samples are not at 16Khz.
```
import os
NEMO_ROOT = os.getcwd()
print(NEMO_ROOT)
import glob
import subprocess
import tarfile
import wget
data_dir = os.path.join(NEMO_ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
# Download the dataset. This will take a few moments...
print("******")
if not os.path.exists(data_dir + '/an4_sphere.tar.gz'):
an4_url = 'http://www.speech.cs.cmu.edu/databases/an4/an4_sphere.tar.gz'
an4_path = wget.download(an4_url, data_dir)
print(f"Dataset downloaded at: {an4_path}")
else:
print("Tarfile already exists.")
an4_path = data_dir + '/an4_sphere.tar.gz'
# Untar and convert .sph to .wav (using sox)
tar = tarfile.open(an4_path)
tar.extractall(path=data_dir)
print("Converting .sph to .wav...")
sph_list = glob.glob(data_dir + '/an4/**/*.sph', recursive=True)
for sph_path in sph_list:
wav_path = sph_path[:-4] + '.wav'
cmd = ["sox", sph_path, wav_path]
subprocess.run(cmd)
print("Finished conversion.\n******")
```
Since an4 is not designed for speaker recognition, this facilitates the opportunity to demonstrate how you can generate manifest files that are necessary for training. These methods can be applied to any dataset to get similar training manifest files.
First get an scp file(s) which has all the wav files with absolute paths for each of the train, dev, and test set. This can be easily done by the `find` bash command
```
!find {data_dir}/an4/wav/an4_clstk -iname "*.wav" > data/an4/wav/an4_clstk/train_all.scp
```
Let's look at the first 3 lines of scp file for train.
```
!head -n 3 {data_dir}/an4/wav/an4_clstk/train_all.scp
```
Since we created the scp file for the train, we use `scp_to_manifest.py` to convert this scp file to a manifest file and then optionally split the files to train \& dev for evaluating the models while training by using the `--split` flag. We wouldn't be needing the `--split` option for the test folder.
Accordingly please mention the `id` number, which is the field num separated by `/` to be considered as the speaker label
After the download and conversion, your `data` folder should contain directories with manifest files as:
* `data/<path>/train.json`
* `data/<path>/dev.json`
* `data/<path>/train_all.json`
Each line in the manifest file describes a training sample - `audio_filepath` contains the path to the wav file, `duration` it's duration in seconds, and `label` is the speaker class label:
`{"audio_filepath": "<absolute path to dataset>data/an4/wav/an4test_clstk/menk/cen4-menk-b.wav", "duration": 3.9, "label": "menk"}`
```
if not os.path.exists('scripts'):
print("Downloading necessary scripts")
!mkdir -p scripts/speaker_tasks
!wget -P scripts/speaker_tasks/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/speaker_tasks/scp_to_manifest.py
!python {NEMO_ROOT}/scripts/speaker_tasks/scp_to_manifest.py --scp {data_dir}/an4/wav/an4_clstk/train_all.scp --id -2 --out {data_dir}/an4/wav/an4_clstk/all_manifest.json --split
```
Generate the scp for the test folder and then convert it to a manifest.
```
!find {data_dir}/an4/wav/an4test_clstk -iname "*.wav" > {data_dir}/an4/wav/an4test_clstk/test_all.scp
!python {NEMO_ROOT}/scripts/speaker_tasks/scp_to_manifest.py --scp {data_dir}/an4/wav/an4test_clstk/test_all.scp --id -2 --out {data_dir}/an4/wav/an4test_clstk/test.json
```
## Path to manifest files
```
train_manifest = os.path.join(data_dir,'an4/wav/an4_clstk/train.json')
validation_manifest = os.path.join(data_dir,'an4/wav/an4_clstk/dev.json')
test_manifest = os.path.join(data_dir,'an4/wav/an4_clstk/dev.json')
```
As the goal of most speaker-related systems is to get good speaker level embeddings that could help distinguish from
other speakers, we shall first train these embeddings in end-to-end
manner optimizing the [QuartzNet](https://arxiv.org/abs/1910.10261) based encoder model on cross-entropy loss.
We modify the decoder to get these fixed-size embeddings irrespective of the length of the input audio. We employ a mean and variance
based statistics pooling method to grab these embeddings.
# Training
Import necessary packages
```
import nemo
# NeMo's ASR collection - this collections contains complete ASR models and
# building blocks (modules) for ASR
import nemo.collections.asr as nemo_asr
from omegaconf import OmegaConf
```
## Model Configuration
The SpeakerNet Model is defined in a config file which declares multiple important sections.
They are:
1) model: All arguments that will relate to the Model - preprocessors, encoder, decoder, optimizer and schedulers, datasets, and any other related information
2) trainer: Any argument to be passed to PyTorch Lightning
```
# This line will print the entire config of sample SpeakerNet model
!mkdir conf
!wget -P conf https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/speaker_tasks/recognition/conf/SpeakerNet_recognition_3x2x512.yaml
MODEL_CONFIG = os.path.join(NEMO_ROOT,'conf/SpeakerNet_recognition_3x2x512.yaml')
config = OmegaConf.load(MODEL_CONFIG)
print(OmegaConf.to_yaml(config))
```
## Setting up the datasets within the config
If you'll notice, there are a few config dictionaries called train_ds, validation_ds and test_ds. These are configurations used to setup the Dataset and DataLoaders of the corresponding config.
```
print(OmegaConf.to_yaml(config.model.train_ds))
print(OmegaConf.to_yaml(config.model.validation_ds))
```
You will often notice that some configs have ??? in place of paths. This is used as a placeholder so that the user can change the value at a later time.
Let's add the paths to the manifests to the config above
Also, since an4 dataset doesn't have a test set of the same speakers used in training, we will use validation manifest as test manifest for demonstration purposes
```
config.model.train_ds.manifest_filepath = train_manifest
config.model.validation_ds.manifest_filepath = validation_manifest
config.model.test_ds.manifest_filepath = validation_manifest
```
Also as we are training on an4 dataset, there are 74 speaker labels in training, and we need to set this in the decoder config
```
config.model.decoder.num_classes = 74
```
## Building the PyTorch Lightning Trainer
NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem!
Let us first instantiate a Trainer object!
```
import torch
import pytorch_lightning as pl
print("Trainer config - \n")
print(OmegaConf.to_yaml(config.trainer))
# Let us modify some trainer configs for this demo
# Checks if we have GPU available and uses it
cuda = 1 if torch.cuda.is_available() else 0
config.trainer.gpus = cuda
# Reduces maximum number of epochs to 5 for quick demonstration
config.trainer.max_epochs = 5
# Remove distributed training flags
config.trainer.accelerator = None
trainer = pl.Trainer(**config.trainer)
```
## Setting up a NeMo Experiment
NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it !
```
from nemo.utils.exp_manager import exp_manager
log_dir = exp_manager(trainer, config.get("exp_manager", None))
# The log_dir provides a path to the current logging directory for easy access
print(log_dir)
```
## Building the SpeakerNet Model
SpeakerNet is an ASR model with a classification task - it generates one label for the entire provided audio stream. Therefore we encapsulate it inside the EncDecSpeakerLabelModel as follows.
```
speaker_model = nemo_asr.models.EncDecSpeakerLabelModel(cfg=config.model, trainer=trainer)
```
Before we begin training, let us first create a Tensorboard visualization to monitor progress
```
try:
from google import colab
COLAB_ENV = True
except (ImportError, ModuleNotFoundError):
COLAB_ENV = False
# Load the TensorBoard notebook extension
if COLAB_ENV:
%load_ext tensorboard
%tensorboard --logdir {exp_dir}
else:
print("To use tensorboard, please use this notebook in a Google Colab environment.")
```
As any NeMo model is inherently a PyTorch Lightning Model, it can easily be trained in a single line - trainer.fit(model)!
We see below that the model begins to get modest scores on the validation set after just 5 epochs of training
```
trainer.fit(speaker_model)
```
This config is not suited and designed for an4 so you may observe unstable val_loss
If you have a test manifest file, we can easily compute test accuracy by running
<pre><code>trainer.test(speaker_model, ckpt_path=None)
</code></pre>
## For Faster Training
We can dramatically improve the time taken to train this model by using Multi GPU training along with Mixed Precision.
For multi-GPU training, take a look at the [PyTorch Lightning Multi-GPU training section](https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html)
For mixed-precision training, take a look at the [PyTorch Lightning Mixed-Precision training section](https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html#mixed-precision-16-bit-training)
### Mixed precision:
<pre><code>trainer = Trainer(amp_level='O1', precision=16)
</code></pre>
### Trainer with a distributed backend:
<pre><code>trainer = Trainer(gpus=2, num_nodes=2, accelerator='ddp')
</code></pre>
Of course, you can combine these flags as well.
## Saving/Restoring a checkpoint
There are multiple ways to save and load models in NeMo. Since all NeMo models are inherently Lightning Modules, we can use the standard way that PyTorch Lightning saves and restores models.
NeMo also provides a more advanced model save/restore format, which encapsulates all the parts of the model that are required to restore that model for immediate use.
In this example, we will explore both ways of saving and restoring models, but we will focus on the PyTorch Lightning method.
## Saving and Restoring via PyTorch Lightning Checkpoints
When using NeMo for training, it is advisable to utilize the exp_manager framework. It is tasked with handling checkpointing and logging (Tensorboard as well as WandB optionally!), as well as dealing with multi-node and multi-GPU logging.
Since we utilized the exp_manager framework above, we have access to the directory where the checkpoints exist.
exp_manager with the default settings will save multiple checkpoints for us -
1) A few checkpoints from certain steps of training. They will have --val_loss= tags
2) A checkpoint at the last epoch of training denotes by --last.
3) If the model finishes training, it will also have a --last checkpoint.
```
# Let us list all the checkpoints we have
checkpoint_dir = os.path.join(log_dir, 'checkpoints')
checkpoint_paths = list(glob.glob(os.path.join(checkpoint_dir, "*.ckpt")))
checkpoint_paths
final_checkpoint = list(filter(lambda x: "-last.ckpt" in x, checkpoint_paths))[0]
print(final_checkpoint)
```
## Restoring from a PyTorch Lightning checkpoint
To restore a model using the LightningModule.load_from_checkpoint() class method.
```
restored_model = nemo_asr.models.EncDecSpeakerLabelModel.load_from_checkpoint(final_checkpoint)
```
# Finetuning
Since we don't have any new manifest file to finetune, I will demonstrate here by using the test manifest file we created earlier.
an4 test dataset has a different set of speakers from the train set (total number: 10). And as we didn't split this dataset for validation I will use the same for validation.
So to finetune all we need to do is, update our model config with these manifest paths and change num of decoder classes to create a new decoder with an updated number of classes
```
test_manifest = os.path.join(data_dir,'an4/wav/an4test_clstk/test.json')
config.model.train_ds.manifest_filepath = test_manifest
config.model.validation_ds.manifest_filepath = test_manifest
config.model.decoder.num_classes = 10
```
Once you set up the necessary model config parameters all we need to do is call setup_finetune_model method
```
restored_model.setup_finetune_model(config.model)
```
So we have set up the data and changed the decoder required for finetune, we now just need to create a trainer and start training with a smaller learning rate for fewer epochs
```
# Setup the new trainer object
# Let us modify some trainer configs for this demo
# Checks if we have GPU available and uses it
cuda = 1 if torch.cuda.is_available() else 0
trainer_config = OmegaConf.create(dict(
gpus=cuda,
max_epochs=5,
max_steps=None, # computed at runtime if not set
num_nodes=1,
accumulate_grad_batches=1,
checkpoint_callback=False, # Provided by exp_manager
logger=False, # Provided by exp_manager
log_every_n_steps=1, # Interval of logging.
val_check_interval=1.0, # Set to 0.25 to check 4 times per epoch, or an int for number of iterations
))
print(OmegaConf.to_yaml(trainer_config))
trainer_finetune = pl.Trainer(**trainer_config)
```
## Setting the trainer to the restored model
Setting the trainer to the restored model
```
restored_model.set_trainer(trainer_finetune)
log_dir_finetune = exp_manager(trainer_finetune, config.get("exp_manager", None))
print(log_dir_finetune)
```
## Setup optimizer + scheduler
For a fine-tuning experiment, let us set up the optimizer and scheduler!
We will use a much lower learning rate than before
```
import copy
optim_sched_cfg = copy.deepcopy(restored_model._cfg.optim)
# Struct mode prevents us from popping off elements from the config, so let us disable it
OmegaConf.set_struct(optim_sched_cfg, False)
# Let us change the maximum learning rate to previous minimum learning rate
optim_sched_cfg.lr = 0.001
# Set "min_lr" to lower value
optim_sched_cfg.sched.min_lr = 1e-4
print(OmegaConf.to_yaml(optim_sched_cfg))
# Now let us update the optimizer settings
restored_model.setup_optimization(optim_sched_cfg)
# We can also just directly replace the config inplace if we choose to
restored_model._cfg.optim = optim_sched_cfg
```
## Fine-tune training step
We fine-tune on the subset recognition problem. Note, the model was originally trained on these classes (the subset defined here has already been trained on above).
When fine-tuning on a truly new dataset, we will not see such a dramatic improvement in performance. However, it should still converge a little faster than if it was trained from scratch.
```
## Fine-tuning for 5 epochs¶
trainer_finetune.fit(restored_model)
```
# Saving .nemo file
Now we can save the whole config and model parameters in a single .nemo and we can anytime restore from it
```
restored_model.save_to(os.path.join(log_dir_finetune, '..',"SpeakerNet.nemo"))
!ls {log_dir_finetune}/..
# restore from a save model
restored_model_2 = nemo_asr.models.EncDecSpeakerLabelModel.restore_from(os.path.join(log_dir_finetune, '..', "SpeakerNet.nemo"))
```
# Speaker Verification
Training for a speaker verification model is almost the same as the speaker recognition model with a change in the loss function. Angular Loss is a better function to train for a speaker verification model as the model is trained in an end to end manner with loss optimizing for embeddings cluster to be far from each other for different speaker by maximizing the angle between these clusters
To train for verification we just need to toggle `angular` flag in `config.model.decoder.params.angular = True`
Once we set this, the loss will be changed to angular loss and we can follow the above steps to the model.
Note the scale and margin values to be set for the loss function are present at `config.model.loss.scale` and `config.model.loss.margin`
## Extract Speaker Embeddings
Once you have a trained model or use one of our pretrained nemo checkpoints to get speaker embeddings for any speaker.
To demonstrate this we shall use `nemo_asr.models.ExtractSpeakerEmbeddingsModel` with say 5 audio_samples from our dev manifest set. This model is specifically for inference purposes to extract embeddings from a trained `.nemo` model
```
verification_model = nemo_asr.models.ExtractSpeakerEmbeddingsModel.restore_from(os.path.join(log_dir_finetune, '..', 'SpeakerNet.nemo'))
```
Now, we need to pass the necessary manifest_filepath and params to set up the data loader for extracting embeddings
```
!head -5 {validation_manifest} > embeddings_manifest.json
config.model.train_ds
from nemo.collections.asr.parts.utils.speaker_utils import embedding_normalize
from tqdm import tqdm
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
import numpy as np
import json
import pickle as pkl
def get_embeddings(speaker_model, manifest_file, batch_size=1, embedding_dir='./', device='cuda'):
test_config = OmegaConf.create(
dict(
manifest_filepath=manifest_file,
sample_rate=16000,
labels=None,
batch_size=batch_size,
shuffle=False,
time_length=20,
)
)
speaker_model.setup_test_data(test_config)
speaker_model = speaker_model.to(device)
speaker_model.eval()
all_embs=[]
out_embeddings = {}
for test_batch in tqdm(speaker_model.test_dataloader()):
test_batch = [x.to(device) for x in test_batch]
audio_signal, audio_signal_len, labels, slices = test_batch
with autocast():
_, embs = speaker_model.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
emb_shape = embs.shape[-1]
embs = embs.view(-1, emb_shape)
all_embs.extend(embs.cpu().detach().numpy())
del test_batch
all_embs = np.asarray(all_embs)
all_embs = embedding_normalize(all_embs)
with open(manifest_file, 'r') as manifest:
for i, line in enumerate(manifest.readlines()):
line = line.strip()
dic = json.loads(line)
uniq_name = '@'.join(dic['audio_filepath'].split('/')[-3:])
out_embeddings[uniq_name] = all_embs[i]
embedding_dir = os.path.join(embedding_dir, 'embeddings')
if not os.path.exists(embedding_dir):
os.makedirs(embedding_dir, exist_ok=True)
prefix = manifest_file.split('/')[-1].rsplit('.', 1)[-2]
name = os.path.join(embedding_dir, prefix)
embeddings_file = name + '_embeddings.pkl'
pkl.dump(out_embeddings, open(embeddings_file, 'wb'))
print("Saved embedding files to {}".format(embedding_dir))
manifest_filepath = os.path.join(NEMO_ROOT,'embeddings_manifest.json')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
get_embeddings(verification_model, manifest_filepath, batch_size=64,embedding_dir='./', device=device)
```
Embeddings are stored in dict structure with key-value pair, key being uniq_name generated based on audio_filepath of the sample present in manifest_file in `embedding_dir`
```
ls ./embeddings/
```
|
github_jupyter
|
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
# Install TorchAudio
!pip install torchaudio>=0.10.0 -f https://download.pytorch.org/whl/torch_stable.html
import os
NEMO_ROOT = os.getcwd()
print(NEMO_ROOT)
import glob
import subprocess
import tarfile
import wget
data_dir = os.path.join(NEMO_ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
# Download the dataset. This will take a few moments...
print("******")
if not os.path.exists(data_dir + '/an4_sphere.tar.gz'):
an4_url = 'http://www.speech.cs.cmu.edu/databases/an4/an4_sphere.tar.gz'
an4_path = wget.download(an4_url, data_dir)
print(f"Dataset downloaded at: {an4_path}")
else:
print("Tarfile already exists.")
an4_path = data_dir + '/an4_sphere.tar.gz'
# Untar and convert .sph to .wav (using sox)
tar = tarfile.open(an4_path)
tar.extractall(path=data_dir)
print("Converting .sph to .wav...")
sph_list = glob.glob(data_dir + '/an4/**/*.sph', recursive=True)
for sph_path in sph_list:
wav_path = sph_path[:-4] + '.wav'
cmd = ["sox", sph_path, wav_path]
subprocess.run(cmd)
print("Finished conversion.\n******")
!find {data_dir}/an4/wav/an4_clstk -iname "*.wav" > data/an4/wav/an4_clstk/train_all.scp
!head -n 3 {data_dir}/an4/wav/an4_clstk/train_all.scp
if not os.path.exists('scripts'):
print("Downloading necessary scripts")
!mkdir -p scripts/speaker_tasks
!wget -P scripts/speaker_tasks/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/speaker_tasks/scp_to_manifest.py
!python {NEMO_ROOT}/scripts/speaker_tasks/scp_to_manifest.py --scp {data_dir}/an4/wav/an4_clstk/train_all.scp --id -2 --out {data_dir}/an4/wav/an4_clstk/all_manifest.json --split
!find {data_dir}/an4/wav/an4test_clstk -iname "*.wav" > {data_dir}/an4/wav/an4test_clstk/test_all.scp
!python {NEMO_ROOT}/scripts/speaker_tasks/scp_to_manifest.py --scp {data_dir}/an4/wav/an4test_clstk/test_all.scp --id -2 --out {data_dir}/an4/wav/an4test_clstk/test.json
train_manifest = os.path.join(data_dir,'an4/wav/an4_clstk/train.json')
validation_manifest = os.path.join(data_dir,'an4/wav/an4_clstk/dev.json')
test_manifest = os.path.join(data_dir,'an4/wav/an4_clstk/dev.json')
import nemo
# NeMo's ASR collection - this collections contains complete ASR models and
# building blocks (modules) for ASR
import nemo.collections.asr as nemo_asr
from omegaconf import OmegaConf
# This line will print the entire config of sample SpeakerNet model
!mkdir conf
!wget -P conf https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/speaker_tasks/recognition/conf/SpeakerNet_recognition_3x2x512.yaml
MODEL_CONFIG = os.path.join(NEMO_ROOT,'conf/SpeakerNet_recognition_3x2x512.yaml')
config = OmegaConf.load(MODEL_CONFIG)
print(OmegaConf.to_yaml(config))
print(OmegaConf.to_yaml(config.model.train_ds))
print(OmegaConf.to_yaml(config.model.validation_ds))
config.model.train_ds.manifest_filepath = train_manifest
config.model.validation_ds.manifest_filepath = validation_manifest
config.model.test_ds.manifest_filepath = validation_manifest
config.model.decoder.num_classes = 74
import torch
import pytorch_lightning as pl
print("Trainer config - \n")
print(OmegaConf.to_yaml(config.trainer))
# Let us modify some trainer configs for this demo
# Checks if we have GPU available and uses it
cuda = 1 if torch.cuda.is_available() else 0
config.trainer.gpus = cuda
# Reduces maximum number of epochs to 5 for quick demonstration
config.trainer.max_epochs = 5
# Remove distributed training flags
config.trainer.accelerator = None
trainer = pl.Trainer(**config.trainer)
from nemo.utils.exp_manager import exp_manager
log_dir = exp_manager(trainer, config.get("exp_manager", None))
# The log_dir provides a path to the current logging directory for easy access
print(log_dir)
speaker_model = nemo_asr.models.EncDecSpeakerLabelModel(cfg=config.model, trainer=trainer)
try:
from google import colab
COLAB_ENV = True
except (ImportError, ModuleNotFoundError):
COLAB_ENV = False
# Load the TensorBoard notebook extension
if COLAB_ENV:
%load_ext tensorboard
%tensorboard --logdir {exp_dir}
else:
print("To use tensorboard, please use this notebook in a Google Colab environment.")
trainer.fit(speaker_model)
# Let us list all the checkpoints we have
checkpoint_dir = os.path.join(log_dir, 'checkpoints')
checkpoint_paths = list(glob.glob(os.path.join(checkpoint_dir, "*.ckpt")))
checkpoint_paths
final_checkpoint = list(filter(lambda x: "-last.ckpt" in x, checkpoint_paths))[0]
print(final_checkpoint)
restored_model = nemo_asr.models.EncDecSpeakerLabelModel.load_from_checkpoint(final_checkpoint)
test_manifest = os.path.join(data_dir,'an4/wav/an4test_clstk/test.json')
config.model.train_ds.manifest_filepath = test_manifest
config.model.validation_ds.manifest_filepath = test_manifest
config.model.decoder.num_classes = 10
restored_model.setup_finetune_model(config.model)
# Setup the new trainer object
# Let us modify some trainer configs for this demo
# Checks if we have GPU available and uses it
cuda = 1 if torch.cuda.is_available() else 0
trainer_config = OmegaConf.create(dict(
gpus=cuda,
max_epochs=5,
max_steps=None, # computed at runtime if not set
num_nodes=1,
accumulate_grad_batches=1,
checkpoint_callback=False, # Provided by exp_manager
logger=False, # Provided by exp_manager
log_every_n_steps=1, # Interval of logging.
val_check_interval=1.0, # Set to 0.25 to check 4 times per epoch, or an int for number of iterations
))
print(OmegaConf.to_yaml(trainer_config))
trainer_finetune = pl.Trainer(**trainer_config)
restored_model.set_trainer(trainer_finetune)
log_dir_finetune = exp_manager(trainer_finetune, config.get("exp_manager", None))
print(log_dir_finetune)
import copy
optim_sched_cfg = copy.deepcopy(restored_model._cfg.optim)
# Struct mode prevents us from popping off elements from the config, so let us disable it
OmegaConf.set_struct(optim_sched_cfg, False)
# Let us change the maximum learning rate to previous minimum learning rate
optim_sched_cfg.lr = 0.001
# Set "min_lr" to lower value
optim_sched_cfg.sched.min_lr = 1e-4
print(OmegaConf.to_yaml(optim_sched_cfg))
# Now let us update the optimizer settings
restored_model.setup_optimization(optim_sched_cfg)
# We can also just directly replace the config inplace if we choose to
restored_model._cfg.optim = optim_sched_cfg
## Fine-tuning for 5 epochs¶
trainer_finetune.fit(restored_model)
restored_model.save_to(os.path.join(log_dir_finetune, '..',"SpeakerNet.nemo"))
!ls {log_dir_finetune}/..
# restore from a save model
restored_model_2 = nemo_asr.models.EncDecSpeakerLabelModel.restore_from(os.path.join(log_dir_finetune, '..', "SpeakerNet.nemo"))
verification_model = nemo_asr.models.ExtractSpeakerEmbeddingsModel.restore_from(os.path.join(log_dir_finetune, '..', 'SpeakerNet.nemo'))
!head -5 {validation_manifest} > embeddings_manifest.json
config.model.train_ds
from nemo.collections.asr.parts.utils.speaker_utils import embedding_normalize
from tqdm import tqdm
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
import numpy as np
import json
import pickle as pkl
def get_embeddings(speaker_model, manifest_file, batch_size=1, embedding_dir='./', device='cuda'):
test_config = OmegaConf.create(
dict(
manifest_filepath=manifest_file,
sample_rate=16000,
labels=None,
batch_size=batch_size,
shuffle=False,
time_length=20,
)
)
speaker_model.setup_test_data(test_config)
speaker_model = speaker_model.to(device)
speaker_model.eval()
all_embs=[]
out_embeddings = {}
for test_batch in tqdm(speaker_model.test_dataloader()):
test_batch = [x.to(device) for x in test_batch]
audio_signal, audio_signal_len, labels, slices = test_batch
with autocast():
_, embs = speaker_model.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
emb_shape = embs.shape[-1]
embs = embs.view(-1, emb_shape)
all_embs.extend(embs.cpu().detach().numpy())
del test_batch
all_embs = np.asarray(all_embs)
all_embs = embedding_normalize(all_embs)
with open(manifest_file, 'r') as manifest:
for i, line in enumerate(manifest.readlines()):
line = line.strip()
dic = json.loads(line)
uniq_name = '@'.join(dic['audio_filepath'].split('/')[-3:])
out_embeddings[uniq_name] = all_embs[i]
embedding_dir = os.path.join(embedding_dir, 'embeddings')
if not os.path.exists(embedding_dir):
os.makedirs(embedding_dir, exist_ok=True)
prefix = manifest_file.split('/')[-1].rsplit('.', 1)[-2]
name = os.path.join(embedding_dir, prefix)
embeddings_file = name + '_embeddings.pkl'
pkl.dump(out_embeddings, open(embeddings_file, 'wb'))
print("Saved embedding files to {}".format(embedding_dir))
manifest_filepath = os.path.join(NEMO_ROOT,'embeddings_manifest.json')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
get_embeddings(verification_model, manifest_filepath, batch_size=64,embedding_dir='./', device=device)
ls ./embeddings/
| 0.752013 | 0.73252 |
# **Convolution for Classification of the Horses from the Humans**
## **Abstract**
Aim of the notebook is to classify the humans and horses by using the neural network and the convolution using TensorFlow.
```
!wget --no-check-certificate \
https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip \
-O /tmp/horse-or-human.zip
!wget --no-check-certificate \
https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip \
-O /tmp/validation-horse-or-human.zip
```
The following python code will use the OS library to use Operating System libraries, giving you access to the file system, and the zipfile library allowing you to unzip the data.
```
import os
import zipfile
local_zip = '/tmp/horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/horse-or-human')
local_zip = '/tmp/validation-horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/validation-horse-or-human')
zip_ref.close()
```
The contents of the .zip are extracted to the base directory `/tmp/horse-or-human`, which in turn each contain `horses` and `humans` subdirectories.
The training set is the data that is used to tell the neural network model that 'this is what a horse looks like', 'this is what a human looks like' etc.
There is no need to explicitly label the images as horses or humans. It is coded to read images from subdirectories, and automatically label them from the name of that subdirectory.
```
# Directory with our training horse pictures
train_horse_dir = os.path.join('/tmp/horse-or-human/horses')
# Directory with our training human pictures
train_human_dir = os.path.join('/tmp/horse-or-human/humans')
# Directory with our training horse pictures
validation_horse_dir = os.path.join('/tmp/validation-horse-or-human/validation-horses')
# Directory with our training human pictures
validation_human_dir = os.path.join('/tmp/validation-horse-or-human/validation-humans')
import tensorflow as tf
```
Finally we add the densely connected layers.
Note that because we are facing a two-class classification problem, i.e. a *binary classification problem* - end our network with a [*sigmoid* activation](https://wikipedia.org/wiki/Sigmoid_function), so that the output of our network will be a single scalar between 0 and 1, encoding the probability that the current image is class 1 (as opposed to class 0).
```
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# The fifth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans')
tf.keras.layers.Dense(1, activation='sigmoid')
])
```
The model.summary() method call prints a summary of the NN
```
model.summary()
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
```
### **Data Preprocessing**
Let's set up data generators that will read pictures in our source folders, convert them to `float32` tensors, and feed them (with their labels) to our network. We'll have one generator for the training images and one for the validation images. Our generators will yield batches of images of size 300x300 and their labels (binary).
```
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1/255)
validation_datagen = ImageDataGenerator(rescale=1/255)
# Flow training images in batches of 128 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
'/tmp/horse-or-human/', # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=128,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow training images in batches of 128 using train_datagen generator
validation_generator = validation_datagen.flow_from_directory(
'/tmp/validation-horse-or-human/', # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=32,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
```
### **Training**
```
history = model.fit_generator(
train_generator,
steps_per_epoch=8,
epochs=15,
verbose=1,
validation_data = validation_generator,
validation_steps=8)
```
### **Running the Model**
```
import numpy as np
from google.colab import files
from keras.preprocessing import image
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = '/content/' + fn
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(classes[0])
if classes[0]>0.5:
print(fn + " is a human")
else:
print(fn + " is a horse")
```
Conclusion
It was able to predict other image which was not seen by the model.
### **Refrence**
1. https://www.coursera.org/learn/introduction-tensorflow/home/welcome
2. https://www.tensorflow.org/
Copyright 2020 Abhishek Gargha Maheshwarappa
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
github_jupyter
|
!wget --no-check-certificate \
https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip \
-O /tmp/horse-or-human.zip
!wget --no-check-certificate \
https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip \
-O /tmp/validation-horse-or-human.zip
import os
import zipfile
local_zip = '/tmp/horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/horse-or-human')
local_zip = '/tmp/validation-horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/validation-horse-or-human')
zip_ref.close()
# Directory with our training horse pictures
train_horse_dir = os.path.join('/tmp/horse-or-human/horses')
# Directory with our training human pictures
train_human_dir = os.path.join('/tmp/horse-or-human/humans')
# Directory with our training horse pictures
validation_horse_dir = os.path.join('/tmp/validation-horse-or-human/validation-horses')
# Directory with our training human pictures
validation_human_dir = os.path.join('/tmp/validation-horse-or-human/validation-humans')
import tensorflow as tf
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# The fifth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans')
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.summary()
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1/255)
validation_datagen = ImageDataGenerator(rescale=1/255)
# Flow training images in batches of 128 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
'/tmp/horse-or-human/', # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=128,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow training images in batches of 128 using train_datagen generator
validation_generator = validation_datagen.flow_from_directory(
'/tmp/validation-horse-or-human/', # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=32,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
history = model.fit_generator(
train_generator,
steps_per_epoch=8,
epochs=15,
verbose=1,
validation_data = validation_generator,
validation_steps=8)
import numpy as np
from google.colab import files
from keras.preprocessing import image
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = '/content/' + fn
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(classes[0])
if classes[0]>0.5:
print(fn + " is a human")
else:
print(fn + " is a horse")
| 0.612426 | 0.9858 |
# Exploring Network Architectures
In this section you will:
* Explore the impact of different neural network architectures on performance including:
* Accuracy,
* Training time,
* And loss
* Build an intuition for how layers can build up or destroy useful information
## What is Network Architecture
In short, a neural network architecture's is defined the number and shape of it's layers. In state-of-the-art neural networks these architectures can get quite complex, with some layers skipping other layers, or some layers passing data backwards to previous layers. This lab is focused on relatively simple architectures for fully connected neural networks.
Once again, we'll be working with the MNIST dataset.
```
# Imports and formatting the data
# See previous lab if this is confusing
from matplotlib import pyplot as plt
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
num_classes = 10
image_size = 784
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_data = training_images.reshape(training_images.shape[0], image_size)
test_data = test_images.reshape(test_images.shape[0], image_size)
training_labels = to_categorical(training_labels, num_classes)
test_labels = to_categorical(test_labels, num_classes)
# A helpful function we'll be using all over the place to plot training information:
def plot_training_history(history, model):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['training', 'validation'], loc='best')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['training', 'validation'], loc='best')
plt.show()
loss, accuracy = model.evaluate(test_data, test_labels, verbose=False)
print(f'Test loss: {loss:.3}')
print(f'Test accuracy: {accuracy:.3}')
```
## Purpose of Layers
Each layer is an opportunity to add *or reduce* complexity. In some cases, enormous complexity is required to accurately model a problem. In other cases, simpler architectures may perform better.
One appropriate analogy is that a neural network is like an assembly line–each layer adds a piece of the final product, and as our data nears the end, a complete picture emerges.
Another appropriate analogy is that a neural netork is like the game of telephone—every time we say the phrase we just heard into the ear of our neighbor, some of the information is lost. By the end, the phrase has changed completely.
Paradoxically, these are both true. Sometime we want to ignore parts of the input, because they may be irrelevant. Othertimes, we need to build up complex interactions between different pieces of the input.
One truism though, is that more complex models always require more training.
## Even Simple Architectures Can Work
Even this simple model will significantly outperform random guessing, and it's fast and easy to train.
```
model = Sequential()
model.add(Dense(units=5, activation='sigmoid', input_shape=(image_size,)))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(training_data, training_labels, batch_size=128, epochs=50, verbose=False, validation_split=.1)
plot_training_history(history, model)
```
## Too Simple Is Also Possible
Overly simplistic models will always fail. Frankly, it is remarkable that this next network performs as well as it does (nearly twice as good as guessing randomly!) but no amount of training will make this network architecture an accurate classifier.
```
model = Sequential()
model.add(Dense(units=1, activation='sigmoid', input_shape=(image_size,)))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(training_data, training_labels, batch_size=128, epochs=50, verbose=False, validation_split=.1)
plot_training_history(history, model)
```
## More Parameters is Often Better
Increasing the number of parameters can often yield better results, in this case we have to train for fewer epochs to achieve a higher accuracy. But keep in mind that we are almost always trading speed and computational power for accuracy.
```
model = Sequential()
model.add(Dense(units=256, activation='sigmoid', input_shape=(image_size,)))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(training_data, training_labels, batch_size=128, epochs=10, verbose=False, validation_split=.1)
plot_training_history(history, model)
```
# Complexity Allows More Overfitting
While overly simple networks don't have the capacity to capture the phenomenon (underfitting) overly complex networks have enough capacity to "memorize" the training data, which can result in overfitting. Overfitting also tends to become more pronounced as we train networks for more epochs.
```
model = Sequential()
model.add(Dense(units=2048, activation='sigmoid', input_shape=(image_size,)))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(training_data, training_labels, batch_size=128, epochs=10, verbose=False, validation_split=.1)
plot_training_history(history, model)
```
Notice how after awhile we see that our performance on training data is better than on validation data. This is a sign that we might have overfit our data—and indeed the model tops out at over 98% on the training data, but drops down to 95% on the test data. This model with a 2048-node layer performs a little better than the previous one which had only 256 nodes in its hidden layer. Lets see what happens when we really get extreme:
```
model = Sequential()
model.add(Dense(units=10000, activation='sigmoid', input_shape=(image_size,)))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(training_data, training_labels, batch_size=128, epochs=10, verbose=False, validation_split=.1)
plot_training_history(history, model)
```
This network took **much** longer to train. It appears to be overfit as early as the second training epoch, and indeed when we run the test data it's worse than the training performance. It's worth noting that this is still a pretty good result though at 96% test accuracy.
This is an example of the diminishing returns that neural networks sometimes exhibit — it takes a LOT of power and time and complexity to squeeze out the last few percentage points of accuracy.
## Multiple Layers
Making each layer wider is one way to add complexity, but not the only way. Consider these two architectures:
```
# 10,000 total nodes in hidden layers
wide_model = Sequential()
wide_model.add(Dense(units=10000, activation='sigmoid', input_shape=(image_size,)))
wide_model.add(Dense(units=num_classes, activation='softmax'))
wide_model.summary()
# 9000 total nodes in hidden layers
long_model = Sequential()
long_model.add(Dense(units=3000, activation='sigmoid', input_shape=(image_size,)))
long_model.add(Dense(units=3000, activation='sigmoid'))
long_model.add(Dense(units=3000, activation='sigmoid'))
long_model.add(Dense(units=num_classes, activation='softmax'))
long_model.summary()
```
## Some Things to Note:
The second of these models has nearly 3 times as many parameters even though it has 1000 fewer nodes. This means it will take much longer to train.
Additional layers are commonly understood to understand "higher level" features. An anology that is useful but not exactly mathematically proven is: the first layer is recognizing simple features, such as edges. The second layer is detecting combinations of edges, such as shapes. The third layer is detecting combinations of shapes... and so on.
To be very clear: this is a helpful mental model, but should not be treated as a truism or fact. The most literal way to understand this point is that the first layer represents combinations of the raw data-points (pixels in this case) with themselves. The second layer is combinations of those combinations. As we go deeper, we are very literally making at combinations of combinations of combinations.
Regardless of the interpretation, "Deep Learning" is the study of deep neural networks. State of the art results are being achieved by networks with MANY layers, not by networks with a single hidden layer.
## Lets See How `long_model` Performs...
```
long_model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = long_model.fit(training_data, training_labels, batch_size=128, epochs=5, verbose=False, validation_split=.1)
plot_training_history(history, long_model)
```
Well, it takes FOREVER to train just 5 epochs, but unlike the 10k node model, it seems to continue improving rather than plateu right away. It also doesn't appear to have overfit the data. Interestingly, it's still underperforming compared to some of the simpler models, it's possible this could change if we train for long enough—but other models are doing more with significantly fewer resources... it's worth thinking about why that might be.
## Thinking About Resolution
In the case of the MNIST digits dataset, the initial input is only 28 by 28 pixels, a total of 784 data points. In the previous `wide_model` and `long_model` we 'increased' the resolution of the each datapoint to 10,000 and 3000 values per layer respectively.
The strength of neural networks is capturing complex interactions between data points, but scaling 784 values into 10,000 or 3000 nodes makes training the network harder and makes the problem of the second layer **more** complex than the original problem. Obviously, we were still able to achieve decent performance this way, but consider this:
```
long_model_two = Sequential()
long_model_two.add(Dense(units=256, activation='sigmoid', input_shape=(image_size,)))
long_model_two.add(Dense(units=128, activation='sigmoid'))
long_model_two.add(Dense(units=64, activation='sigmoid'))
long_model_two.add(Dense(units=num_classes, activation='softmax'))
long_model_two.summary()
long_model_two.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = long_model_two.fit(training_data, training_labels, batch_size=128, epochs=50, verbose=False, validation_split=.1)
plot_training_history(history, long_model_two)
```
By reducing the size of the problem at each layer, we make each epoch much faster. In this case, we've also improved accuracy. As in many things, overcomplication is a big problem. On MNIST, its possible to do better than 95% with a deep neural net, but to really break into the top tier (99% accuracy has been achieved) we need Convolutional Neural Networks, which we'll study later.
|
github_jupyter
|
# Imports and formatting the data
# See previous lab if this is confusing
from matplotlib import pyplot as plt
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
num_classes = 10
image_size = 784
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_data = training_images.reshape(training_images.shape[0], image_size)
test_data = test_images.reshape(test_images.shape[0], image_size)
training_labels = to_categorical(training_labels, num_classes)
test_labels = to_categorical(test_labels, num_classes)
# A helpful function we'll be using all over the place to plot training information:
def plot_training_history(history, model):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['training', 'validation'], loc='best')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['training', 'validation'], loc='best')
plt.show()
loss, accuracy = model.evaluate(test_data, test_labels, verbose=False)
print(f'Test loss: {loss:.3}')
print(f'Test accuracy: {accuracy:.3}')
model = Sequential()
model.add(Dense(units=5, activation='sigmoid', input_shape=(image_size,)))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(training_data, training_labels, batch_size=128, epochs=50, verbose=False, validation_split=.1)
plot_training_history(history, model)
model = Sequential()
model.add(Dense(units=1, activation='sigmoid', input_shape=(image_size,)))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(training_data, training_labels, batch_size=128, epochs=50, verbose=False, validation_split=.1)
plot_training_history(history, model)
model = Sequential()
model.add(Dense(units=256, activation='sigmoid', input_shape=(image_size,)))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(training_data, training_labels, batch_size=128, epochs=10, verbose=False, validation_split=.1)
plot_training_history(history, model)
model = Sequential()
model.add(Dense(units=2048, activation='sigmoid', input_shape=(image_size,)))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(training_data, training_labels, batch_size=128, epochs=10, verbose=False, validation_split=.1)
plot_training_history(history, model)
model = Sequential()
model.add(Dense(units=10000, activation='sigmoid', input_shape=(image_size,)))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(training_data, training_labels, batch_size=128, epochs=10, verbose=False, validation_split=.1)
plot_training_history(history, model)
# 10,000 total nodes in hidden layers
wide_model = Sequential()
wide_model.add(Dense(units=10000, activation='sigmoid', input_shape=(image_size,)))
wide_model.add(Dense(units=num_classes, activation='softmax'))
wide_model.summary()
# 9000 total nodes in hidden layers
long_model = Sequential()
long_model.add(Dense(units=3000, activation='sigmoid', input_shape=(image_size,)))
long_model.add(Dense(units=3000, activation='sigmoid'))
long_model.add(Dense(units=3000, activation='sigmoid'))
long_model.add(Dense(units=num_classes, activation='softmax'))
long_model.summary()
long_model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = long_model.fit(training_data, training_labels, batch_size=128, epochs=5, verbose=False, validation_split=.1)
plot_training_history(history, long_model)
long_model_two = Sequential()
long_model_two.add(Dense(units=256, activation='sigmoid', input_shape=(image_size,)))
long_model_two.add(Dense(units=128, activation='sigmoid'))
long_model_two.add(Dense(units=64, activation='sigmoid'))
long_model_two.add(Dense(units=num_classes, activation='softmax'))
long_model_two.summary()
long_model_two.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = long_model_two.fit(training_data, training_labels, batch_size=128, epochs=50, verbose=False, validation_split=.1)
plot_training_history(history, long_model_two)
| 0.915898 | 0.991812 |
### Converting pairwise constraints to labeled samples
Clustering is an unsupervised data analysis technique, in which a dataset is partitioned into a set of clusters, which each consist of a dense region of samples. Unlike classification, what constitutes a good clustering is ambiguous. Semi-supervised or constrained clustering introduces information to this problem, in the form of pairwise constraints. This allows clusterings to be found which better represent the goals of the user.
A constraint is a relationship between a pair of samples. Must-link constraints indicate the two samples are in the same cluster, and cannot-link constraints indicate that they are in different clusters. It is a challenge to find clusterings that follow the natural structure of the dataset, while adhering to constraints. This is partly due to the difficulty of working with pairwise constraints as opposed to labels. Pairwise constraints do have their advantages, however. It is very simple for a human oracle to determine if a pair of samples are in the same or different classes. If there are a large number of classes, or if classes do not have obvious labels (image segmentation), then it is difficult to provide a label.
In this work, we turn a set of pairwise constraints into a set of labeled samples, which can be used to train a classifier. Thus, the very difficult task of constrained clustering is simplified to a classification problem. As a classification problem, new samples can be added to resulting grouping in an online manner. Classifiers are much more efficient than constrained clustering techniques. Active learning and outlier detection are also better suited to the classification domain.
```
import matplotlib.pyplot as plt
import sklearn.datasets as ds
from robustclust import get_constraints, \
E2CP, \
SpectralLearning, \
plot_constraints
```
First, we make some synthetic data, consistently of 2-dimensions Gaussian blobs. We also generate a random set of constraints.
```
n_clusters, N, n_constraints = (3, 1000, 30)
data, labels = ds.make_blobs(n_samples=N,
n_features=2,
centers=n_clusters)
constraint_mat, _ = get_constraints(data, labels, n_constraints=n_constraints)
```
We can plot the data and the constraints. Must-links (ML) are indicated by solid lines, while cannot-links (CL) are represented by dashed lines.
```
plot_constraints(data, labels=labels, constraint_mat=constraint_mat)
```
Now we create a ConstraintsToLabels object, which accepts the data and constraint sets, which are the two forms of information available in a constrained clustering problem. A call to the fit_constrained() method converts the pairwise constraints into a set of labeled data. The samples which are labeled are those which are involved in a constraint.
The method uses unsupervised hierarchical clustering to agglomerate the constrained samples into groups which do not violate any cannot-link constraints. The groups I will refer to as nodes. Between the nodes, there are both ML and CL constraints. In the plot below I draw the nodes, overlying the groups of samples they represent, with the population of the group shown in the node. The net constraint values between the nodes (#ML - #CL) are represented by lines of different thickness.
```
cc1 = E2CP(n_clusters=n_clusters, constraint_mat=constraint_mat)
cc1.fit_constrained(data)
plot_constraints(data, labels=cc1.labels)
cc2 = SpectralLearning(n_clusters=n_clusters, constraint_mat=constraint_mat)
cc2.fit_constrained(data)
plot_constraints(data, labels=cc2.labels)
```
This is a special graph cut problem. A good solution joins nodes with a high #ML - #CL value between them. The result of this process is a set of labels (trainLabels) for the set of constrained samples (trainInd).
We plot the very simple classification problem below, which was derived from a complicated mess of pairwise constraints. Note that the number of clusters was not known by the method.
```
plt.figure()
cc.plot_labels(data)
cc.plot_labels(data[trainInd,:], trainLabels)
plt.show()
```
|
github_jupyter
|
import matplotlib.pyplot as plt
import sklearn.datasets as ds
from robustclust import get_constraints, \
E2CP, \
SpectralLearning, \
plot_constraints
n_clusters, N, n_constraints = (3, 1000, 30)
data, labels = ds.make_blobs(n_samples=N,
n_features=2,
centers=n_clusters)
constraint_mat, _ = get_constraints(data, labels, n_constraints=n_constraints)
plot_constraints(data, labels=labels, constraint_mat=constraint_mat)
cc1 = E2CP(n_clusters=n_clusters, constraint_mat=constraint_mat)
cc1.fit_constrained(data)
plot_constraints(data, labels=cc1.labels)
cc2 = SpectralLearning(n_clusters=n_clusters, constraint_mat=constraint_mat)
cc2.fit_constrained(data)
plot_constraints(data, labels=cc2.labels)
plt.figure()
cc.plot_labels(data)
cc.plot_labels(data[trainInd,:], trainLabels)
plt.show()
| 0.524151 | 0.995027 |
```
%matplotlib inline
import numpy as np
import pandas as pd
from glob import glob
from copy import copy
import scipy.optimize
import scipy.stats
import scipy.signal
import numpy.linalg
import matplotlib.pyplot as plt
sigma_x = 0.8160 # Tobias (confirmed) number # 0.570 # TDR Number
sigma_y = 0.0163 # Tobias (confirmed) number # 0.020 # TDR Number
#sigma_x = 0.01
#sigma_y = 0.01
def sigma_phi(x, y):
r2 = x**2 + y**2
return ((-y / r2) ** 2 * sigma_x ** 2 + (x / r2) ** 2 * sigma_y ** 2) ** 0.5
def phi(x, y):
return np.arctan2(y, x)
def w_phi(x, y):
return 1 / sigma_phi(x, y) ** 2
w_x = 1 / sigma_x ** 2
w_y = 1 / sigma_y ** 2
C = np.diag([sigma_x ** 2, sigma_y ** 2])
Cinv = np.linalg.inv(C)
def space_point_prediction(z, cx, mx, cy, my):
return np.array([
[mx*z + cx],
[my*z + cy],
])
def event_evaluation(mx, cx, my, cy, x, y, z):
predictions = [space_point_prediction(_z, cx, mx, cy, my) for _z in z]
delta = [np.array([[_x], [_y]]) - p for _x, _y, p in zip(x, y, predictions)]
return sum(d.transpose() @ Cinv @ d for d in delta)[0][0]
N = 100000
layer_positions = np.array([50, 100, 150])
cx = scipy.stats.norm.rvs(loc=0, scale=10, size=N)
mx = scipy.stats.norm.rvs(loc=0, scale=0.05, size=N)
cy = scipy.stats.norm.rvs(loc=0, scale=10, size=N)
my = scipy.stats.norm.rvs(loc=0, scale=0.05, size=N)
z = np.zeros(N) # scipy.stats.norm.rvs(loc=0, scale=10, size=N)
mc_truth = pd.DataFrame({
"cx": cx,
"mx": mx,
"cy": cy,
"my": my,
})
# Nominal points on straight line
sp0 = [space_point_prediction(layer_positions[0] + _z, _cx, _mx, _cy, _my) for _z, _cx, _mx, _cy, _my in zip(z, cx, mx, cy, my)]
sp1 = [space_point_prediction(layer_positions[1] + _z, _cx, _mx, _cy, _my) for _z, _cx, _mx, _cy, _my in zip(z, cx, mx, cy, my)]
sp2 = [space_point_prediction(layer_positions[2] + _z, _cx, _mx, _cy, _my) for _z, _cx, _mx, _cy, _my in zip(z, cx, mx, cy, my)]
# Resolution effect
n0 = scipy.stats.multivariate_normal.rvs(mean=np.array([0, 0]), cov=C, size=(N))
n1 = scipy.stats.multivariate_normal.rvs(mean=np.array([0, 0]), cov=C, size=(N))
n2 = scipy.stats.multivariate_normal.rvs(mean=np.array([0, 0]), cov=C, size=(N))
# Only add noise to x and y
position0 = np.array([sp[:, 0] + n.transpose() for sp, n in zip(sp0, n0)])
position1 = np.array([sp[:, 0] + n.transpose() for sp, n in zip(sp1, n1)])
position2 = np.array([sp[:, 0] + n.transpose() for sp, n in zip(sp2, n2)])
mod_sp = pd.DataFrame({
"x0": position0[:, 0],
"x1": position1[:, 0],
"x2": position2[:, 0],
"y0": position0[:, 1],
"y1": position1[:, 1],
"y2": position2[:, 1],
"z0": layer_positions[0] * np.ones(N),
"z1": layer_positions[1] * np.ones(N),
"z2": layer_positions[2] * np.ones(N),
})
mod_sp = pd.concat([mod_sp, mc_truth], axis=1)
# Transform to lokal x' = R^-1 T^-1 x
# Apply misalignment, need funtion m(xyz) -> m((xyz), alpha)
# Transform to global x = TRx'
def R(alpha, beta, gamma):
"alpha: rot along x, beta: rot along y, gamma: rot along z"
return np.array([
[1, -gamma, beta, 0],
[gamma, 1, -alpha, 0],
[-beta, alpha, 1, 0],
[0, 0, 0, 1]
])
def T(x, y, z):
return np.array([
[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1],
])
# Given our geometry, the transformation is given by
global_to_layer0 = lambda x: (np.linalg.inv(T(0, 0, 50) @ R(0, 0, 0)) @ np.array([*x, 1]))[:3]
global_to_layer1 = lambda x: (np.linalg.inv(T(0, 0, 100) @ R(0, 0, 0)) @ np.array([*x, 1]))[:3]
global_to_layer2 = lambda x: (np.linalg.inv(T(0, 0, 150) @ R(0, 0, 0)) @ np.array([*x, 1]))[:3]
layer0_to_global = lambda x: (T(0, 0, 50) @ R(0, 0, 0) @ np.array([*x, 1]))[:3]
layer1_to_global = lambda x: (T(0, 0, 100) @ R(0, 0, 0) @ np.array([*x, 1]))[:3]
layer2_to_global = lambda x: (T(0, 0, 150) @ R(0, 0, 0) @ np.array([*x, 1]))[:3]
mod_sp["local_x0"], mod_sp["local_y0"], mod_sp["local_z0"] = zip(*mod_sp[["x0", "y0", "z0"]].apply(global_to_layer0, axis=1))
mod_sp["local_x1"], mod_sp["local_y1"], mod_sp["local_z1"] = zip(*mod_sp[["x1", "y1", "z1"]].apply(global_to_layer1, axis=1))
mod_sp["local_x2"], mod_sp["local_y2"], mod_sp["local_z2"] = zip(*mod_sp[["x2", "y2", "z2"]].apply(global_to_layer2, axis=1))
l0 = np.array([
scipy.stats.norm.rvs(loc=0, scale=0.100),
scipy.stats.norm.rvs(loc=0, scale=0.100),
0,
0,
0,
scipy.stats.norm.rvs(loc=0, scale=2e-3)
])
l1 = np.array([
scipy.stats.norm.rvs(loc=0, scale=0.100),
scipy.stats.norm.rvs(loc=0, scale=0.100),
0,
0,
0,
scipy.stats.norm.rvs(loc=0, scale=2e-3)
])
l2 = np.array([
scipy.stats.norm.rvs(loc=0, scale=0.100),
scipy.stats.norm.rvs(loc=0, scale=0.100),
0,
0,
0,
scipy.stats.norm.rvs(loc=0, scale=2e-3)
])
mod_sp["local_x0"], mod_sp["local_y0"], mod_sp["local_z0"] = zip(*mod_sp[["local_x0", "local_y0", "local_z0"]].apply(
lambda x: (T(*l0[:3]) @ R(*l0[3:]) @ np.array([*x, 1]))[:3], axis=1))
mod_sp["local_x1"], mod_sp["local_y1"], mod_sp["local_z1"] = zip(*mod_sp[["local_x1", "local_y1", "local_z1"]].apply(
lambda x: (T(*l1[:3]) @ R(*l1[3:]) @ np.array([*x, 1]))[:3], axis=1))
mod_sp["local_x2"], mod_sp["local_y2"], mod_sp["local_z2"] = zip(*mod_sp[["local_x2", "local_y2", "local_z2"]].apply(
lambda x: (T(*l2[:3]) @ R(*l2[3:]) @ np.array([*x, 1]))[:3], axis=1))
mod_sp["global_x0"], mod_sp["global_y0"], mod_sp["global_z0"] = zip(*mod_sp[["local_x0", "local_y0", "local_z0"]].apply(layer0_to_global, axis=1))
mod_sp["global_x1"], mod_sp["global_y1"], mod_sp["global_z1"] = zip(*mod_sp[["local_x1", "local_y1", "local_z1"]].apply(layer1_to_global, axis=1))
mod_sp["global_x2"], mod_sp["global_y2"], mod_sp["global_z2"] = zip(*mod_sp[["local_x2", "local_y2", "local_z2"]].apply(layer2_to_global, axis=1))
def f(z, cx, mx, cy, my):
return np.array([mx*z + cx, my*z + cy, z])
def A(z):
return np.block([
[np.vstack([np.ones(len(z)), z]).T, np.zeros((3, 2))],
[np.zeros((3, 2)), np.vstack([np.ones(len(z)), z]).T],
])
def b(x, y):
return np.block([
x, y
])
#p = np.linalg.lstsq(A(test_x, test_y, test_z), b(test_x, test_y), rcond=None)[0]
mod_sp["cx_fit"], mod_sp["mx_fit"], mod_sp["cy_fit"], mod_sp["my_fit"] = zip(*mod_sp[["global_x0", "global_x1", "global_x2", "global_y0", "global_y1", "global_y2", "global_z0", "global_z1", "global_z2"]].apply(
lambda x: np.linalg.lstsq(A(x[6:]), b(x[:3], x[3:6]), rcond=None)[0], axis=1
))
mod_sp[f"residual_x0"], mod_sp[f"residual_y0"] = zip(*(mod_sp[[f"z0", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer0(x)[:2]) - mod_sp[["local_x0", "local_y0"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"residual_x1"], mod_sp[f"residual_y1"] = zip(*(mod_sp[[f"z1", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer1(x)[:2]) - mod_sp[["local_x1", "local_y1"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"residual_x2"], mod_sp[f"residual_y2"] = zip(*(mod_sp[[f"z2", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer2(x)[:2]) - mod_sp[["local_x2", "local_y2"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"chi2"] = mod_sp.residual_x0 ** 2 / sigma_x ** 2 + mod_sp.residual_x1 ** 2 / sigma_x ** 2 + mod_sp.residual_x2 ** 2 / sigma_x ** 2 + mod_sp.residual_y0 ** 2 / sigma_y ** 2 + mod_sp.residual_y1 ** 2 / sigma_y ** 2 + mod_sp.residual_y2 ** 2 / sigma_y ** 2
mod_sp["residual_cx"] = mod_sp.cx - mod_sp.cx_fit
mod_sp["residual_mx"] = mod_sp.mx - mod_sp.mx_fit
mod_sp["residual_cy"] = mod_sp.cy - mod_sp.cy_fit
mod_sp["residual_my"] = mod_sp.my - mod_sp.my_fit
def plot_metrics(df):
print(f'Layer 0 x Residual: ({df[f"residual_x0"].mean() * 1000: .3f} +- {df[f"residual_x0"].std() * 1000: .3f})' + r' $\mu$m')
print(f'Layer 1 x Residual: ({df[f"residual_x1"].mean() * 1000: .3f} +- {df[f"residual_x1"].std() * 1000: .3f})' + r' $\mu$m')
print(f'Layer 2 x Residual: ({df[f"residual_x2"].mean() * 1000: .3f} +- {df[f"residual_x2"].std() * 1000: .3f})' + r' $\mu$m')
print(f'Layer 0 y Residual: ({df[f"residual_y0"].mean() * 1000: .3f} +- {df[f"residual_y0"].std() * 1000: .3f})' + r' $\mu$m')
print(f'Layer 1 y Residual: ({df[f"residual_y1"].mean() * 1000: .3f} +- {df[f"residual_y1"].std() * 1000: .3f})' + r' $\mu$m')
print(f'Layer 2 y Residual: ({df[f"residual_y2"].mean() * 1000: .3f} +- {df[f"residual_y2"].std() * 1000: .3f})' + r' $\mu$m')
print(f'cx Residual: ({df[f"residual_cx"].mean() * 10000: .3f} +- {df[f"residual_cx"].std() * 1000: .3f})' + r' mm')
print(f'mx Residual: ({df[f"residual_mx"].mean() * 10000: .3f} +- {df[f"residual_mx"].std() * 1000: .3f})' + r' ')
print(f'cy Residual: ({df[f"residual_cy"].mean() * 10000: .3f} +- {df[f"residual_cy"].std() * 1000: .3f})' + r' mm')
print(f'my Residual: ({df[f"residual_my"].mean() * 10000: .3f} +- {df[f"residual_my"].std() * 1000: .3f})' + r' ')
plot_residual(df[f"residual_x0"], "Layer 0", "x")
plot_residual(df[f"residual_x1"], "Layer 1", "x")
plot_residual(df[f"residual_x2"], "Layer 2", "x")
plot_residual(df[f"residual_y0"], "Layer 0", "y")
plot_residual(df[f"residual_y1"], "Layer 1", "y")
plot_residual(df[f"residual_y2"], "Layer 2", "y")
plot_chi2(df["chi2"])
plot_residual(df[f"residual_cx"], "Track Parameter", "cx")
plot_residual(df[f"residual_mx"], "Track Parameter", "mx")
plot_residual(df[f"residual_cy"], "Track Parameter", "cy")
plot_residual(df[f"residual_my"], "Track Parameter", "my")
plt.show()
plt.close()
def plot_residual(series, title, axis):
xmin = series.min()
xmax = series.max()
boundary = max(abs(xmin), abs(xmax))
xlim = (-boundary, boundary)
_, edges, _ = plt.hist(series, bins=20, range=xlim, density=True)
tmp = np.linspace(*xlim)
plt.plot(tmp, scipy.stats.norm.pdf(tmp, loc=series.mean(), scale=series.std()),
label=r"$\mu$" + f" = {series.mean() * 1000:.3f}" + r" $\mu$m" + "\n" + r"$\sigma$" + f" = {series.std() * 1000:.3f}" + r" $\mu$m")
plt.title(title)
plt.xlabel(f" {axis} Residual")
plt.ylabel("arb. units")
plt.legend()
plt.show()
plt.close()
def plot_chi2(series):
tmp = np.linspace(0, 20, num=100)
plt.hist(series, bins=50, density=True, range=(0, 50))
plt.plot(tmp, scipy.stats.chi2.pdf(tmp, df=2))
plt.xlabel(r"$\chi^2$")
plt.ylabel(r"arb. units")
plt.show()
plt.close()
plot_metrics(mod_sp)
def What(z):
_A = A(z)
_W = np.diag([1/sigma_x**2, 1/sigma_x**2, 1/sigma_x**2, 1/sigma_y**2, 1/sigma_y**2, 1/sigma_y**2])
return (np.identity(_A.shape[0]) - _A @ np.linalg.inv(_A.transpose() @ _W @ _A) @ _A.transpose() @ _W).transpose() @ _W
_What = What(np.array([50, 100, 150]))
def dr_da(x, y):
drda_x = lambda y: np.array([-1, 0, y])
drda_y = lambda x: np.array([0, -1, -x])
spacer = 3
return np.array([
[*drda_x(y[0]), *np.zeros(spacer), *np.zeros(spacer)],
[*np.zeros(spacer), *drda_x(y[1]), *np.zeros(spacer)],
[*np.zeros(spacer), *np.zeros(spacer), *drda_x(y[2])],
[*drda_y(x[0]), *np.zeros(spacer), *np.zeros(spacer)],
[*np.zeros(spacer), *drda_y(x[1]), *np.zeros(spacer)],
[*np.zeros(spacer), *np.zeros(spacer), *drda_y(x[2])],
])
t = mod_sp[[
"local_x0", "local_x1", "local_x2",
"local_y0", "local_y1", "local_y2",
"residual_x0", "residual_x1", "residual_x2",
"residual_y0", "residual_y1", "residual_y2",
]].apply(
lambda x: dr_da(x[:3], x[3:6]).transpose() @ _What @ x[6:], axis=1).sum()
M = mod_sp[[
"local_x0", "local_x1", "local_x2",
"local_y0", "local_y1", "local_y2",
]].apply(
lambda x: dr_da(x[:3], x[3:6]).transpose() @ _What @ dr_da(x[:3], x[3:6]), axis=1).sum()
t = t
M = M
w, v = np.linalg.eig(M)
w = np.real(w)
v = np.real(v)
plt.title("Eigenvalues")
plt.plot(range(len(w)), w, marker="o", ls="")
plt.ylim(0, None)
plt.xlabel("Eigenvalue Index")
plt.ylabel("Eigenvalue Magnitude")
#plt.text(3, max(w), "index from 3 to 8 small but nonzero")
plt.savefig("eigenvalues.png")
plt.show()
plt.close()
print(w)
winv = (w ** -1)
winv[-4:] = 0
Minv = v @ np.diag(winv) @ v.transpose()
for m, s in zip(-Minv @ t, Minv.diagonal() ** 0.5):
print(f"{m: 4f} +- {s: 4f}")
a = -Minv @ t
mod_sp["aligned_x0"], mod_sp["aligned_y0"], mod_sp["aligned_z0"] = zip(*mod_sp[["local_x0", "local_y0", "local_z0"]].apply(
lambda x: (T(a[0], a[1], 0) @ R(0, 0, a[2]) @ np.array([*x, 1]))[:3], axis=1).apply(layer0_to_global))
mod_sp["aligned_x1"], mod_sp["aligned_y1"], mod_sp["aligned_z1"] = zip(*mod_sp[["local_x1", "local_y1", "local_z1"]].apply(
lambda x: (T(a[3], a[4], 0) @ R(0, 0, a[5]) @ np.array([*x, 1]))[:3], axis=1).apply(layer1_to_global))
mod_sp["aligned_x2"], mod_sp["aligned_y2"], mod_sp["aligned_z2"] = zip(*mod_sp[["local_x2", "local_y2", "local_z2"]].apply(
lambda x: (T(a[6], a[7], 0) @ R(0, 0, a[8]) @ np.array([*x, 1]))[:3], axis=1).apply(layer2_to_global))
mod_sp["cx_fit"], mod_sp["mx_fit"], mod_sp["cy_fit"], mod_sp["my_fit"] = zip(*mod_sp[["aligned_x0", "aligned_x1", "aligned_x2", "aligned_y0", "aligned_y1", "aligned_y2", "aligned_z0", "aligned_z1", "aligned_z2"]].apply(
lambda x: np.linalg.lstsq(A(x[6:]), b(x[:3], x[3:6]), rcond=None)[0], axis=1
))
mod_sp[f"residual_x0"], mod_sp[f"residual_y0"] = zip(*(mod_sp[[f"z0", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer0(x)[:2]) - mod_sp[["aligned_x0", "aligned_y0"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"residual_x1"], mod_sp[f"residual_y1"] = zip(*(mod_sp[[f"z1", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer1(x)[:2]) - mod_sp[["aligned_x1", "aligned_y1"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"residual_x2"], mod_sp[f"residual_y2"] = zip(*(mod_sp[[f"z2", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer2(x)[:2]) - mod_sp[["aligned_x2", "aligned_y2"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"chi2"] = mod_sp.residual_x0 ** 2 / sigma_x ** 2 + mod_sp.residual_x1 ** 2 / sigma_x ** 2 + mod_sp.residual_x2 ** 2 / sigma_x ** 2 + mod_sp.residual_y0 ** 2 / sigma_y ** 2 + mod_sp.residual_y1 ** 2 / sigma_y ** 2 + mod_sp.residual_y2 ** 2 / sigma_y ** 2
mod_sp["residual_cx"] = mod_sp.cx - mod_sp.cx_fit
mod_sp["residual_mx"] = mod_sp.mx - mod_sp.mx_fit
mod_sp["residual_cy"] = mod_sp.cy - mod_sp.cy_fit
mod_sp["residual_my"] = mod_sp.my - mod_sp.my_fit
plot_metrics(mod_sp)
```
|
github_jupyter
|
%matplotlib inline
import numpy as np
import pandas as pd
from glob import glob
from copy import copy
import scipy.optimize
import scipy.stats
import scipy.signal
import numpy.linalg
import matplotlib.pyplot as plt
sigma_x = 0.8160 # Tobias (confirmed) number # 0.570 # TDR Number
sigma_y = 0.0163 # Tobias (confirmed) number # 0.020 # TDR Number
#sigma_x = 0.01
#sigma_y = 0.01
def sigma_phi(x, y):
r2 = x**2 + y**2
return ((-y / r2) ** 2 * sigma_x ** 2 + (x / r2) ** 2 * sigma_y ** 2) ** 0.5
def phi(x, y):
return np.arctan2(y, x)
def w_phi(x, y):
return 1 / sigma_phi(x, y) ** 2
w_x = 1 / sigma_x ** 2
w_y = 1 / sigma_y ** 2
C = np.diag([sigma_x ** 2, sigma_y ** 2])
Cinv = np.linalg.inv(C)
def space_point_prediction(z, cx, mx, cy, my):
return np.array([
[mx*z + cx],
[my*z + cy],
])
def event_evaluation(mx, cx, my, cy, x, y, z):
predictions = [space_point_prediction(_z, cx, mx, cy, my) for _z in z]
delta = [np.array([[_x], [_y]]) - p for _x, _y, p in zip(x, y, predictions)]
return sum(d.transpose() @ Cinv @ d for d in delta)[0][0]
N = 100000
layer_positions = np.array([50, 100, 150])
cx = scipy.stats.norm.rvs(loc=0, scale=10, size=N)
mx = scipy.stats.norm.rvs(loc=0, scale=0.05, size=N)
cy = scipy.stats.norm.rvs(loc=0, scale=10, size=N)
my = scipy.stats.norm.rvs(loc=0, scale=0.05, size=N)
z = np.zeros(N) # scipy.stats.norm.rvs(loc=0, scale=10, size=N)
mc_truth = pd.DataFrame({
"cx": cx,
"mx": mx,
"cy": cy,
"my": my,
})
# Nominal points on straight line
sp0 = [space_point_prediction(layer_positions[0] + _z, _cx, _mx, _cy, _my) for _z, _cx, _mx, _cy, _my in zip(z, cx, mx, cy, my)]
sp1 = [space_point_prediction(layer_positions[1] + _z, _cx, _mx, _cy, _my) for _z, _cx, _mx, _cy, _my in zip(z, cx, mx, cy, my)]
sp2 = [space_point_prediction(layer_positions[2] + _z, _cx, _mx, _cy, _my) for _z, _cx, _mx, _cy, _my in zip(z, cx, mx, cy, my)]
# Resolution effect
n0 = scipy.stats.multivariate_normal.rvs(mean=np.array([0, 0]), cov=C, size=(N))
n1 = scipy.stats.multivariate_normal.rvs(mean=np.array([0, 0]), cov=C, size=(N))
n2 = scipy.stats.multivariate_normal.rvs(mean=np.array([0, 0]), cov=C, size=(N))
# Only add noise to x and y
position0 = np.array([sp[:, 0] + n.transpose() for sp, n in zip(sp0, n0)])
position1 = np.array([sp[:, 0] + n.transpose() for sp, n in zip(sp1, n1)])
position2 = np.array([sp[:, 0] + n.transpose() for sp, n in zip(sp2, n2)])
mod_sp = pd.DataFrame({
"x0": position0[:, 0],
"x1": position1[:, 0],
"x2": position2[:, 0],
"y0": position0[:, 1],
"y1": position1[:, 1],
"y2": position2[:, 1],
"z0": layer_positions[0] * np.ones(N),
"z1": layer_positions[1] * np.ones(N),
"z2": layer_positions[2] * np.ones(N),
})
mod_sp = pd.concat([mod_sp, mc_truth], axis=1)
# Transform to lokal x' = R^-1 T^-1 x
# Apply misalignment, need funtion m(xyz) -> m((xyz), alpha)
# Transform to global x = TRx'
def R(alpha, beta, gamma):
"alpha: rot along x, beta: rot along y, gamma: rot along z"
return np.array([
[1, -gamma, beta, 0],
[gamma, 1, -alpha, 0],
[-beta, alpha, 1, 0],
[0, 0, 0, 1]
])
def T(x, y, z):
return np.array([
[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1],
])
# Given our geometry, the transformation is given by
global_to_layer0 = lambda x: (np.linalg.inv(T(0, 0, 50) @ R(0, 0, 0)) @ np.array([*x, 1]))[:3]
global_to_layer1 = lambda x: (np.linalg.inv(T(0, 0, 100) @ R(0, 0, 0)) @ np.array([*x, 1]))[:3]
global_to_layer2 = lambda x: (np.linalg.inv(T(0, 0, 150) @ R(0, 0, 0)) @ np.array([*x, 1]))[:3]
layer0_to_global = lambda x: (T(0, 0, 50) @ R(0, 0, 0) @ np.array([*x, 1]))[:3]
layer1_to_global = lambda x: (T(0, 0, 100) @ R(0, 0, 0) @ np.array([*x, 1]))[:3]
layer2_to_global = lambda x: (T(0, 0, 150) @ R(0, 0, 0) @ np.array([*x, 1]))[:3]
mod_sp["local_x0"], mod_sp["local_y0"], mod_sp["local_z0"] = zip(*mod_sp[["x0", "y0", "z0"]].apply(global_to_layer0, axis=1))
mod_sp["local_x1"], mod_sp["local_y1"], mod_sp["local_z1"] = zip(*mod_sp[["x1", "y1", "z1"]].apply(global_to_layer1, axis=1))
mod_sp["local_x2"], mod_sp["local_y2"], mod_sp["local_z2"] = zip(*mod_sp[["x2", "y2", "z2"]].apply(global_to_layer2, axis=1))
l0 = np.array([
scipy.stats.norm.rvs(loc=0, scale=0.100),
scipy.stats.norm.rvs(loc=0, scale=0.100),
0,
0,
0,
scipy.stats.norm.rvs(loc=0, scale=2e-3)
])
l1 = np.array([
scipy.stats.norm.rvs(loc=0, scale=0.100),
scipy.stats.norm.rvs(loc=0, scale=0.100),
0,
0,
0,
scipy.stats.norm.rvs(loc=0, scale=2e-3)
])
l2 = np.array([
scipy.stats.norm.rvs(loc=0, scale=0.100),
scipy.stats.norm.rvs(loc=0, scale=0.100),
0,
0,
0,
scipy.stats.norm.rvs(loc=0, scale=2e-3)
])
mod_sp["local_x0"], mod_sp["local_y0"], mod_sp["local_z0"] = zip(*mod_sp[["local_x0", "local_y0", "local_z0"]].apply(
lambda x: (T(*l0[:3]) @ R(*l0[3:]) @ np.array([*x, 1]))[:3], axis=1))
mod_sp["local_x1"], mod_sp["local_y1"], mod_sp["local_z1"] = zip(*mod_sp[["local_x1", "local_y1", "local_z1"]].apply(
lambda x: (T(*l1[:3]) @ R(*l1[3:]) @ np.array([*x, 1]))[:3], axis=1))
mod_sp["local_x2"], mod_sp["local_y2"], mod_sp["local_z2"] = zip(*mod_sp[["local_x2", "local_y2", "local_z2"]].apply(
lambda x: (T(*l2[:3]) @ R(*l2[3:]) @ np.array([*x, 1]))[:3], axis=1))
mod_sp["global_x0"], mod_sp["global_y0"], mod_sp["global_z0"] = zip(*mod_sp[["local_x0", "local_y0", "local_z0"]].apply(layer0_to_global, axis=1))
mod_sp["global_x1"], mod_sp["global_y1"], mod_sp["global_z1"] = zip(*mod_sp[["local_x1", "local_y1", "local_z1"]].apply(layer1_to_global, axis=1))
mod_sp["global_x2"], mod_sp["global_y2"], mod_sp["global_z2"] = zip(*mod_sp[["local_x2", "local_y2", "local_z2"]].apply(layer2_to_global, axis=1))
def f(z, cx, mx, cy, my):
return np.array([mx*z + cx, my*z + cy, z])
def A(z):
return np.block([
[np.vstack([np.ones(len(z)), z]).T, np.zeros((3, 2))],
[np.zeros((3, 2)), np.vstack([np.ones(len(z)), z]).T],
])
def b(x, y):
return np.block([
x, y
])
#p = np.linalg.lstsq(A(test_x, test_y, test_z), b(test_x, test_y), rcond=None)[0]
mod_sp["cx_fit"], mod_sp["mx_fit"], mod_sp["cy_fit"], mod_sp["my_fit"] = zip(*mod_sp[["global_x0", "global_x1", "global_x2", "global_y0", "global_y1", "global_y2", "global_z0", "global_z1", "global_z2"]].apply(
lambda x: np.linalg.lstsq(A(x[6:]), b(x[:3], x[3:6]), rcond=None)[0], axis=1
))
mod_sp[f"residual_x0"], mod_sp[f"residual_y0"] = zip(*(mod_sp[[f"z0", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer0(x)[:2]) - mod_sp[["local_x0", "local_y0"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"residual_x1"], mod_sp[f"residual_y1"] = zip(*(mod_sp[[f"z1", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer1(x)[:2]) - mod_sp[["local_x1", "local_y1"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"residual_x2"], mod_sp[f"residual_y2"] = zip(*(mod_sp[[f"z2", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer2(x)[:2]) - mod_sp[["local_x2", "local_y2"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"chi2"] = mod_sp.residual_x0 ** 2 / sigma_x ** 2 + mod_sp.residual_x1 ** 2 / sigma_x ** 2 + mod_sp.residual_x2 ** 2 / sigma_x ** 2 + mod_sp.residual_y0 ** 2 / sigma_y ** 2 + mod_sp.residual_y1 ** 2 / sigma_y ** 2 + mod_sp.residual_y2 ** 2 / sigma_y ** 2
mod_sp["residual_cx"] = mod_sp.cx - mod_sp.cx_fit
mod_sp["residual_mx"] = mod_sp.mx - mod_sp.mx_fit
mod_sp["residual_cy"] = mod_sp.cy - mod_sp.cy_fit
mod_sp["residual_my"] = mod_sp.my - mod_sp.my_fit
def plot_metrics(df):
print(f'Layer 0 x Residual: ({df[f"residual_x0"].mean() * 1000: .3f} +- {df[f"residual_x0"].std() * 1000: .3f})' + r' $\mu$m')
print(f'Layer 1 x Residual: ({df[f"residual_x1"].mean() * 1000: .3f} +- {df[f"residual_x1"].std() * 1000: .3f})' + r' $\mu$m')
print(f'Layer 2 x Residual: ({df[f"residual_x2"].mean() * 1000: .3f} +- {df[f"residual_x2"].std() * 1000: .3f})' + r' $\mu$m')
print(f'Layer 0 y Residual: ({df[f"residual_y0"].mean() * 1000: .3f} +- {df[f"residual_y0"].std() * 1000: .3f})' + r' $\mu$m')
print(f'Layer 1 y Residual: ({df[f"residual_y1"].mean() * 1000: .3f} +- {df[f"residual_y1"].std() * 1000: .3f})' + r' $\mu$m')
print(f'Layer 2 y Residual: ({df[f"residual_y2"].mean() * 1000: .3f} +- {df[f"residual_y2"].std() * 1000: .3f})' + r' $\mu$m')
print(f'cx Residual: ({df[f"residual_cx"].mean() * 10000: .3f} +- {df[f"residual_cx"].std() * 1000: .3f})' + r' mm')
print(f'mx Residual: ({df[f"residual_mx"].mean() * 10000: .3f} +- {df[f"residual_mx"].std() * 1000: .3f})' + r' ')
print(f'cy Residual: ({df[f"residual_cy"].mean() * 10000: .3f} +- {df[f"residual_cy"].std() * 1000: .3f})' + r' mm')
print(f'my Residual: ({df[f"residual_my"].mean() * 10000: .3f} +- {df[f"residual_my"].std() * 1000: .3f})' + r' ')
plot_residual(df[f"residual_x0"], "Layer 0", "x")
plot_residual(df[f"residual_x1"], "Layer 1", "x")
plot_residual(df[f"residual_x2"], "Layer 2", "x")
plot_residual(df[f"residual_y0"], "Layer 0", "y")
plot_residual(df[f"residual_y1"], "Layer 1", "y")
plot_residual(df[f"residual_y2"], "Layer 2", "y")
plot_chi2(df["chi2"])
plot_residual(df[f"residual_cx"], "Track Parameter", "cx")
plot_residual(df[f"residual_mx"], "Track Parameter", "mx")
plot_residual(df[f"residual_cy"], "Track Parameter", "cy")
plot_residual(df[f"residual_my"], "Track Parameter", "my")
plt.show()
plt.close()
def plot_residual(series, title, axis):
xmin = series.min()
xmax = series.max()
boundary = max(abs(xmin), abs(xmax))
xlim = (-boundary, boundary)
_, edges, _ = plt.hist(series, bins=20, range=xlim, density=True)
tmp = np.linspace(*xlim)
plt.plot(tmp, scipy.stats.norm.pdf(tmp, loc=series.mean(), scale=series.std()),
label=r"$\mu$" + f" = {series.mean() * 1000:.3f}" + r" $\mu$m" + "\n" + r"$\sigma$" + f" = {series.std() * 1000:.3f}" + r" $\mu$m")
plt.title(title)
plt.xlabel(f" {axis} Residual")
plt.ylabel("arb. units")
plt.legend()
plt.show()
plt.close()
def plot_chi2(series):
tmp = np.linspace(0, 20, num=100)
plt.hist(series, bins=50, density=True, range=(0, 50))
plt.plot(tmp, scipy.stats.chi2.pdf(tmp, df=2))
plt.xlabel(r"$\chi^2$")
plt.ylabel(r"arb. units")
plt.show()
plt.close()
plot_metrics(mod_sp)
def What(z):
_A = A(z)
_W = np.diag([1/sigma_x**2, 1/sigma_x**2, 1/sigma_x**2, 1/sigma_y**2, 1/sigma_y**2, 1/sigma_y**2])
return (np.identity(_A.shape[0]) - _A @ np.linalg.inv(_A.transpose() @ _W @ _A) @ _A.transpose() @ _W).transpose() @ _W
_What = What(np.array([50, 100, 150]))
def dr_da(x, y):
drda_x = lambda y: np.array([-1, 0, y])
drda_y = lambda x: np.array([0, -1, -x])
spacer = 3
return np.array([
[*drda_x(y[0]), *np.zeros(spacer), *np.zeros(spacer)],
[*np.zeros(spacer), *drda_x(y[1]), *np.zeros(spacer)],
[*np.zeros(spacer), *np.zeros(spacer), *drda_x(y[2])],
[*drda_y(x[0]), *np.zeros(spacer), *np.zeros(spacer)],
[*np.zeros(spacer), *drda_y(x[1]), *np.zeros(spacer)],
[*np.zeros(spacer), *np.zeros(spacer), *drda_y(x[2])],
])
t = mod_sp[[
"local_x0", "local_x1", "local_x2",
"local_y0", "local_y1", "local_y2",
"residual_x0", "residual_x1", "residual_x2",
"residual_y0", "residual_y1", "residual_y2",
]].apply(
lambda x: dr_da(x[:3], x[3:6]).transpose() @ _What @ x[6:], axis=1).sum()
M = mod_sp[[
"local_x0", "local_x1", "local_x2",
"local_y0", "local_y1", "local_y2",
]].apply(
lambda x: dr_da(x[:3], x[3:6]).transpose() @ _What @ dr_da(x[:3], x[3:6]), axis=1).sum()
t = t
M = M
w, v = np.linalg.eig(M)
w = np.real(w)
v = np.real(v)
plt.title("Eigenvalues")
plt.plot(range(len(w)), w, marker="o", ls="")
plt.ylim(0, None)
plt.xlabel("Eigenvalue Index")
plt.ylabel("Eigenvalue Magnitude")
#plt.text(3, max(w), "index from 3 to 8 small but nonzero")
plt.savefig("eigenvalues.png")
plt.show()
plt.close()
print(w)
winv = (w ** -1)
winv[-4:] = 0
Minv = v @ np.diag(winv) @ v.transpose()
for m, s in zip(-Minv @ t, Minv.diagonal() ** 0.5):
print(f"{m: 4f} +- {s: 4f}")
a = -Minv @ t
mod_sp["aligned_x0"], mod_sp["aligned_y0"], mod_sp["aligned_z0"] = zip(*mod_sp[["local_x0", "local_y0", "local_z0"]].apply(
lambda x: (T(a[0], a[1], 0) @ R(0, 0, a[2]) @ np.array([*x, 1]))[:3], axis=1).apply(layer0_to_global))
mod_sp["aligned_x1"], mod_sp["aligned_y1"], mod_sp["aligned_z1"] = zip(*mod_sp[["local_x1", "local_y1", "local_z1"]].apply(
lambda x: (T(a[3], a[4], 0) @ R(0, 0, a[5]) @ np.array([*x, 1]))[:3], axis=1).apply(layer1_to_global))
mod_sp["aligned_x2"], mod_sp["aligned_y2"], mod_sp["aligned_z2"] = zip(*mod_sp[["local_x2", "local_y2", "local_z2"]].apply(
lambda x: (T(a[6], a[7], 0) @ R(0, 0, a[8]) @ np.array([*x, 1]))[:3], axis=1).apply(layer2_to_global))
mod_sp["cx_fit"], mod_sp["mx_fit"], mod_sp["cy_fit"], mod_sp["my_fit"] = zip(*mod_sp[["aligned_x0", "aligned_x1", "aligned_x2", "aligned_y0", "aligned_y1", "aligned_y2", "aligned_z0", "aligned_z1", "aligned_z2"]].apply(
lambda x: np.linalg.lstsq(A(x[6:]), b(x[:3], x[3:6]), rcond=None)[0], axis=1
))
mod_sp[f"residual_x0"], mod_sp[f"residual_y0"] = zip(*(mod_sp[[f"z0", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer0(x)[:2]) - mod_sp[["aligned_x0", "aligned_y0"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"residual_x1"], mod_sp[f"residual_y1"] = zip(*(mod_sp[[f"z1", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer1(x)[:2]) - mod_sp[["aligned_x1", "aligned_y1"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"residual_x2"], mod_sp[f"residual_y2"] = zip(*(mod_sp[[f"z2", f"cx_fit", f"mx_fit", f"cy_fit", f"my_fit"]].apply(lambda x: f(*x), axis=1).apply(lambda x: global_to_layer2(x)[:2]) - mod_sp[["aligned_x2", "aligned_y2"]].apply(lambda x: np.array([*x]), axis=1)))
mod_sp[f"chi2"] = mod_sp.residual_x0 ** 2 / sigma_x ** 2 + mod_sp.residual_x1 ** 2 / sigma_x ** 2 + mod_sp.residual_x2 ** 2 / sigma_x ** 2 + mod_sp.residual_y0 ** 2 / sigma_y ** 2 + mod_sp.residual_y1 ** 2 / sigma_y ** 2 + mod_sp.residual_y2 ** 2 / sigma_y ** 2
mod_sp["residual_cx"] = mod_sp.cx - mod_sp.cx_fit
mod_sp["residual_mx"] = mod_sp.mx - mod_sp.mx_fit
mod_sp["residual_cy"] = mod_sp.cy - mod_sp.cy_fit
mod_sp["residual_my"] = mod_sp.my - mod_sp.my_fit
plot_metrics(mod_sp)
| 0.563378 | 0.576721 |
# Electric Machinery Fundamentals 5th edition
## Chapter 3
## Animation: Three-phase fluxes
(based on [Example 3-1](Ch3-Example_3-01.ipynb))
**Calculate the net magetic field produced by a three-phase stator (adapted for 50Hz).**
Import the PyLab namespace (provides set of useful commands and constants like $\pi$):
```
%pylab notebook
```
Set up the basic conditions:
```
bmax = 1 # Normalize bmax to 1
freq = 50 # [Hz]
w = 2*pi*freq # [rad/s] angluar velocity
```
First, generate the three component magnetic fields
```
t = linspace(0, 1./50, 100) # 100 values for one period
wt = w*t # we are going to use this quite often
# amplitudes (changed them to see effect of unsymmetry):
B_amp = [[1.0], [1.0], [1.0]]
# time variants
B_time = array([sin(wt),
sin(wt-2*pi/3),
sin(wt+2*pi/3)])
# vectorial shifts
B_shift = [[cos(0) + 1j*sin(0)],
[cos(2*pi/3) + 1j*sin(2*pi/3)],
[cos(-2*pi/3) + 1j*sin(-2*pi/3)]]
# all combined
B_ph = B_amp * B_time
B = B_ph * B_shift
```
Calculate total flux vector `Btot`:
```
Btot = B[0] + B[1] + B[2]
```
Calculate a circle representing the expected maximum value of `Btot`:
```
circle = 1.5 * (cos(wt) + 1j*sin(wt))
```
**Generating the animation:**
```
# First set up the figure, the axis, and the plot element we want to animate
from matplotlib import animation
fig = figure()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax1.set_title('Space vectors in motion')
ax1.set_xlabel('Real')
ax1.set_ylabel('Imag')
ax1.set_xlim(-1.6, 1.6)
ax1.set_ylim(-1.6, 1.6)
ax1.set_aspect('equal')
ax2.set_title('Sinusoidal three-phase')
ax2.set_xlabel('wt [rad]')
ax2.set_xlim(0, 2*pi)
ax2.set_ylim(-1.6, 1.6)
# set up the different line colours
la, lb, lc, ltot, circ = ax1.plot([], [], 'red',
[], [], 'green',
[], [], 'blue',
[], [], 'magenta',
[], [], 'magenta',
lw=2)
# set up the moving dots
da, db, dc = ax2.plot([], [], 'ro',
[], [], 'go',
[], [], 'bo',
lw=2)
tight_layout() # sometimes useful when sub-plots get a bit crowded
# initialization function: plot the background of each frame
def init():
ax1.plot(real(circle), imag(circle), 'black');
ax2.plot(wt, B_ph[0,], 'red',
wt, B_ph[1,], 'green',
wt, B_ph[2,], 'blue',
lw=1);
return
# animation function. This is called sequentially
def animate(i):
re = [real(B[0,i]), real(B[1,i]), real(B[2,i])]
im = [imag(B[0,i]), imag(B[1,i]), imag(B[2,i])]
la.set_data([0, re[0]], [0, im[0]])
lb.set_data([re[0], re[0]+re[1]], [im[0], im[0]+im[1]])
lc.set_data([re[0]+re[1], re[0]+re[1]+re[2]], [im[0]+im[1], im[0]+im[1]+im[2]])
ltot.set_data([0, real(Btot[i])], [0, imag(Btot[i])])
circ.set_data(real(Btot[:i]),imag(Btot[:i]))
da.set_data(wt[i], B_ph[0,i])
db.set_data(wt[i], B_ph[1,i])
dc.set_data(wt[i], B_ph[2,i])
return la, lb, lc, ltot, da, db, dc
# call the animator:
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=100, interval=50)
```
The animation above might be a bit "skippy" due to the browser performance trying to cope with the inline animation.
The solution to a smooth animation is to the animation as a video and embed it right here:
```
from IPython.display import HTML
HTML(anim.to_html5_video())
```
|
github_jupyter
|
%pylab notebook
bmax = 1 # Normalize bmax to 1
freq = 50 # [Hz]
w = 2*pi*freq # [rad/s] angluar velocity
t = linspace(0, 1./50, 100) # 100 values for one period
wt = w*t # we are going to use this quite often
# amplitudes (changed them to see effect of unsymmetry):
B_amp = [[1.0], [1.0], [1.0]]
# time variants
B_time = array([sin(wt),
sin(wt-2*pi/3),
sin(wt+2*pi/3)])
# vectorial shifts
B_shift = [[cos(0) + 1j*sin(0)],
[cos(2*pi/3) + 1j*sin(2*pi/3)],
[cos(-2*pi/3) + 1j*sin(-2*pi/3)]]
# all combined
B_ph = B_amp * B_time
B = B_ph * B_shift
Btot = B[0] + B[1] + B[2]
circle = 1.5 * (cos(wt) + 1j*sin(wt))
# First set up the figure, the axis, and the plot element we want to animate
from matplotlib import animation
fig = figure()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax1.set_title('Space vectors in motion')
ax1.set_xlabel('Real')
ax1.set_ylabel('Imag')
ax1.set_xlim(-1.6, 1.6)
ax1.set_ylim(-1.6, 1.6)
ax1.set_aspect('equal')
ax2.set_title('Sinusoidal three-phase')
ax2.set_xlabel('wt [rad]')
ax2.set_xlim(0, 2*pi)
ax2.set_ylim(-1.6, 1.6)
# set up the different line colours
la, lb, lc, ltot, circ = ax1.plot([], [], 'red',
[], [], 'green',
[], [], 'blue',
[], [], 'magenta',
[], [], 'magenta',
lw=2)
# set up the moving dots
da, db, dc = ax2.plot([], [], 'ro',
[], [], 'go',
[], [], 'bo',
lw=2)
tight_layout() # sometimes useful when sub-plots get a bit crowded
# initialization function: plot the background of each frame
def init():
ax1.plot(real(circle), imag(circle), 'black');
ax2.plot(wt, B_ph[0,], 'red',
wt, B_ph[1,], 'green',
wt, B_ph[2,], 'blue',
lw=1);
return
# animation function. This is called sequentially
def animate(i):
re = [real(B[0,i]), real(B[1,i]), real(B[2,i])]
im = [imag(B[0,i]), imag(B[1,i]), imag(B[2,i])]
la.set_data([0, re[0]], [0, im[0]])
lb.set_data([re[0], re[0]+re[1]], [im[0], im[0]+im[1]])
lc.set_data([re[0]+re[1], re[0]+re[1]+re[2]], [im[0]+im[1], im[0]+im[1]+im[2]])
ltot.set_data([0, real(Btot[i])], [0, imag(Btot[i])])
circ.set_data(real(Btot[:i]),imag(Btot[:i]))
da.set_data(wt[i], B_ph[0,i])
db.set_data(wt[i], B_ph[1,i])
dc.set_data(wt[i], B_ph[2,i])
return la, lb, lc, ltot, da, db, dc
# call the animator:
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=100, interval=50)
from IPython.display import HTML
HTML(anim.to_html5_video())
| 0.496582 | 0.962462 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.