code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set()
import warnings
warnings.filterwarnings('ignore')
import nltk
import re
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
```
## Data extraction
```
data = pd.read_csv('spam.csv', encoding='latin-1')
data.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
data = data.rename(columns={"v1":"class", "v2":"text"})
data.head()
data['class'].value_counts()
data['word_count'] = data['text'].apply(lambda x: len(x.split(" ")))
data[['text', 'word_count']].head()
def average_word(text):
words = text.split(" ")
return (sum(len(word) for word in words)/len(words))
data['avg_word'] = data['text'].apply(lambda x: average_word(x))
data[['text','avg_word']].head()
print("The average number of words in a document is: {}.".format(np.mean(data['word_count'])))
print("The minimum number of words in a document is: {}.".format(min(data['word_count'])))
print("The maximum number of words in a document is: {}.".format(max(data['word_count'])))
fig, ax = plt.subplots(figsize=(15, 6))
ax.set_title('Distribution of number of words', fontsize = 16)
ax.set_xlabel('Number of words')
sns.distplot(data['word_count'], ax=ax, bins=100, color='g');
print('The number of sms-messages in dataset: {}'.format(len(data)))
```
## Preprocessing
```
stopwords = nltk.corpus.stopwords.words('english')
```
### tokenization
```
def get_tokens(text):
tokens = nltk.word_tokenize(text.lower())
replaced = [re.sub('[^0-9\w\s]', '', token) for token in tokens \
if token not in stopwords]
clean_token = list(filter(lambda token: token, replaced))
return clean_token
data['tokens'] = data['text'].apply(get_tokens)
data['tokens'].head()
```
### stemming
```
stemmer = nltk.stem.SnowballStemmer('english')
def stemming(text):
return [stemmer.stem(x) for x in text]
data['stemming'] = data['tokens'].apply(stemming)
data['stemming'].head()
```
### visualization
```
token_dict = Counter(np.concatenate(data['stemming'].values))
words = pd.DataFrame.from_dict(token_dict, orient='index')
words.rename(columns={0: 'count'}, inplace=True)
words.sort_values('count', ascending=False, inplace=True)
```
#### words frequency
```
def word_freq(data, n_words=50):
fig, ax = plt.subplots(figsize=(20, 5))
sns.barplot(x=np.arange(n_words), y=data['count'].values[:n_words], ax=ax)
ax.set_xticks(np.arange(n_words))
ax.set_xticklabels(data.index[:n_words], fontsize=14, rotation=90)
return ax
ax = word_freq(words)
ax.set_title("Word Frequencies", fontsize=16);
```
## vectorization
```
X = [" ".join(x) for x in data['stemming']]
X[0]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(X)
y = data['class'].map({'ham': 0, 'spam': 1})
y.shape
X.shape
```
## training
```
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.1, random_state=17, shuffle=True)
X_train.shape
y_test.shape
```
# Logistic Regression
```
def plot_scores(optimizer):
scores = [[item[0]['C'], item[1]] for item in optimizer.grid_scores_]
scores = np.array(scores)
fig = plt.figure(figsize=(10, 5))
plt.plot(scores[:,0], scores[:,1], alpha=0.3)
plt.xlabel("C parameters")
plt.ylabel("Mean score")
plt.title("Mean score over different C regularization parameter")
plt.show()
clf = LogisticRegression(random_state=17)
parameters = {'C':range(1, 11)}
grid_search_lg = GridSearchCV(clf, parameters, cv=5)
grid_search_lg.fit(X_train, y_train)
plot_scores(grid_search_lg)
grid_search_lg.best_estimator_
print("Accuracy during cross-validation: {} ".format(grid_search_lg.best_score_))
y_pred = grid_search_lg.predict(X_test)
print('Accuracy on test set: {}'.format(accuracy_score(y_test, y_pred)))
```
### example
```
def get_vector(text):
x = get_tokens(text)
x = stemming(x)
x = " ".join(x)
return vectorizer.transform([x])
sample = "FreeMsg: Txt: claim your reward of 3 hours talk time"
sample = get_vector(sample)
pred = grid_search_lg.predict(sample)
print("Result is {}".format(pred[0]))
sample2 = "Have you visited the last lecture on physics?"
sample2 = get_vector(sample2)
pred = grid_search_lg.predict(sample2)
print("Result is {}".format(pred[0]))
sample3 = "Have you visited the last lecture on physics? Just buy this book and you will have all materials! Only 99$"
sample3 = get_vector(sample3)
pred = grid_search_lg.predict(sample3)
print("Result is {}".format(pred[0]))
sample4 = "Only 99$"
sample4 = get_vector(sample4)
pred = grid_search_lg.predict(sample4)
print("Result is {}".format(pred[0]))
```
## MultinomialNB
```
clf = MultinomialNB()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Accuracy on test set: {}".format(accuracy_score(y_test, y_pred)))
```
## improving model
### n-gramms
## 1-gramm
```
X = [" ".join(x) for x in data['stemming']]
ngramm_vectorizer = CountVectorizer()
X = ngramm_vectorizer.fit_transform(X)
X_train_one, X_test_one, y_train_one, y_test_one = train_test_split(X, y, stratify=y, test_size=0.1, random_state=17, shuffle=True)
```
## Naive-Bayes
```
clf = MultinomialNB()
clf.fit(X_train_one, y_train_one)
y_pred = clf.predict(X_test_one)
print("Accuracy on test set for naive-bayes: {}".format(accuracy_score(y_test_one, y_pred)))
```
## Logistic Regression Grid Search
```
clf = LogisticRegression(random_state=17)
parameters = {'C':range(1, 11)}
grid_search_lg = GridSearchCV(clf, parameters, cv=5)
grid_search_lg.fit(X_train_one, y_train_one)
plot_scores(grid_search_lg)
grid_search_lg.best_estimator_
print("Accuracy during cross-validation: {} ".format(grid_search_lg.best_score_))
y_pred_need = grid_search_lg.predict(X_test_one)
print('Accuracy on test set for logistic regression: {}'.format(accuracy_score(y_test_one, y_pred_need)))
scores = []
scores.append(accuracy_score(y_test_one, y_pred_need))
```
## 2-gramm
```
X = [" ".join(x) for x in data['stemming']]
twogramm_vectorizer = CountVectorizer(ngram_range=(1,2))
X = twogramm_vectorizer.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.1, random_state=17, shuffle=True)
clf = LogisticRegression(random_state=17)
parameters = {'C':range(1, 10)}
grid_search_lg = GridSearchCV(clf, parameters, cv=5)
grid_search_lg.fit(X_train, y_train)
plot_scores(grid_search_lg)
grid_search_lg.best_estimator_
print("Accuracy during cross-validation: {} ".format(grid_search_lg.best_score_))
y_pred = grid_search_lg.predict(X_test)
print('Accuracy on test set for logistic regression: {}'.format(accuracy_score(y_test, y_pred)))
scores.append(accuracy_score(y_test, y_pred))
```
## 3-gramm
```
X = [" ".join(x) for x in data['stemming']]
twogramm_vectorizer = CountVectorizer(ngram_range=(1,3))
X = twogramm_vectorizer.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.1, random_state=17, shuffle=True)
clf = LogisticRegression(random_state=17)
parameters = {'C':range(1, 10)}
grid_search_lg = GridSearchCV(clf, parameters, cv=5)
grid_search_lg.fit(X_train, y_train)
plot_scores(grid_search_lg)
print("Accuracy during cross-validation: {} ".format(grid_search_lg.best_score_))
y_pred = grid_search_lg.predict(X_test)
print('Accuracy on test set for logistic regression: {}'.format(accuracy_score(y_test, y_pred)))
scores.append(accuracy_score(y_test, y_pred))
n = [1, 2, 3]
plt.plot(n, scores)
plt.xlabel('n-grams')
plt.ylabel('score on test set')
plt.title('accuracy on different n-grams');
```
## Conclusion: in our case one gram is better than ngram (Logistic reg with C=9)
```
print(classification_report(y_test_one, y_pred_need, target_names=['ham', 'spam']))
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set()
import warnings
warnings.filterwarnings('ignore')
import nltk
import re
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
data = pd.read_csv('spam.csv', encoding='latin-1')
data.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
data = data.rename(columns={"v1":"class", "v2":"text"})
data.head()
data['class'].value_counts()
data['word_count'] = data['text'].apply(lambda x: len(x.split(" ")))
data[['text', 'word_count']].head()
def average_word(text):
words = text.split(" ")
return (sum(len(word) for word in words)/len(words))
data['avg_word'] = data['text'].apply(lambda x: average_word(x))
data[['text','avg_word']].head()
print("The average number of words in a document is: {}.".format(np.mean(data['word_count'])))
print("The minimum number of words in a document is: {}.".format(min(data['word_count'])))
print("The maximum number of words in a document is: {}.".format(max(data['word_count'])))
fig, ax = plt.subplots(figsize=(15, 6))
ax.set_title('Distribution of number of words', fontsize = 16)
ax.set_xlabel('Number of words')
sns.distplot(data['word_count'], ax=ax, bins=100, color='g');
print('The number of sms-messages in dataset: {}'.format(len(data)))
stopwords = nltk.corpus.stopwords.words('english')
def get_tokens(text):
tokens = nltk.word_tokenize(text.lower())
replaced = [re.sub('[^0-9\w\s]', '', token) for token in tokens \
if token not in stopwords]
clean_token = list(filter(lambda token: token, replaced))
return clean_token
data['tokens'] = data['text'].apply(get_tokens)
data['tokens'].head()
stemmer = nltk.stem.SnowballStemmer('english')
def stemming(text):
return [stemmer.stem(x) for x in text]
data['stemming'] = data['tokens'].apply(stemming)
data['stemming'].head()
token_dict = Counter(np.concatenate(data['stemming'].values))
words = pd.DataFrame.from_dict(token_dict, orient='index')
words.rename(columns={0: 'count'}, inplace=True)
words.sort_values('count', ascending=False, inplace=True)
def word_freq(data, n_words=50):
fig, ax = plt.subplots(figsize=(20, 5))
sns.barplot(x=np.arange(n_words), y=data['count'].values[:n_words], ax=ax)
ax.set_xticks(np.arange(n_words))
ax.set_xticklabels(data.index[:n_words], fontsize=14, rotation=90)
return ax
ax = word_freq(words)
ax.set_title("Word Frequencies", fontsize=16);
X = [" ".join(x) for x in data['stemming']]
X[0]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(X)
y = data['class'].map({'ham': 0, 'spam': 1})
y.shape
X.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.1, random_state=17, shuffle=True)
X_train.shape
y_test.shape
def plot_scores(optimizer):
scores = [[item[0]['C'], item[1]] for item in optimizer.grid_scores_]
scores = np.array(scores)
fig = plt.figure(figsize=(10, 5))
plt.plot(scores[:,0], scores[:,1], alpha=0.3)
plt.xlabel("C parameters")
plt.ylabel("Mean score")
plt.title("Mean score over different C regularization parameter")
plt.show()
clf = LogisticRegression(random_state=17)
parameters = {'C':range(1, 11)}
grid_search_lg = GridSearchCV(clf, parameters, cv=5)
grid_search_lg.fit(X_train, y_train)
plot_scores(grid_search_lg)
grid_search_lg.best_estimator_
print("Accuracy during cross-validation: {} ".format(grid_search_lg.best_score_))
y_pred = grid_search_lg.predict(X_test)
print('Accuracy on test set: {}'.format(accuracy_score(y_test, y_pred)))
def get_vector(text):
x = get_tokens(text)
x = stemming(x)
x = " ".join(x)
return vectorizer.transform([x])
sample = "FreeMsg: Txt: claim your reward of 3 hours talk time"
sample = get_vector(sample)
pred = grid_search_lg.predict(sample)
print("Result is {}".format(pred[0]))
sample2 = "Have you visited the last lecture on physics?"
sample2 = get_vector(sample2)
pred = grid_search_lg.predict(sample2)
print("Result is {}".format(pred[0]))
sample3 = "Have you visited the last lecture on physics? Just buy this book and you will have all materials! Only 99$"
sample3 = get_vector(sample3)
pred = grid_search_lg.predict(sample3)
print("Result is {}".format(pred[0]))
sample4 = "Only 99$"
sample4 = get_vector(sample4)
pred = grid_search_lg.predict(sample4)
print("Result is {}".format(pred[0]))
clf = MultinomialNB()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Accuracy on test set: {}".format(accuracy_score(y_test, y_pred)))
X = [" ".join(x) for x in data['stemming']]
ngramm_vectorizer = CountVectorizer()
X = ngramm_vectorizer.fit_transform(X)
X_train_one, X_test_one, y_train_one, y_test_one = train_test_split(X, y, stratify=y, test_size=0.1, random_state=17, shuffle=True)
clf = MultinomialNB()
clf.fit(X_train_one, y_train_one)
y_pred = clf.predict(X_test_one)
print("Accuracy on test set for naive-bayes: {}".format(accuracy_score(y_test_one, y_pred)))
clf = LogisticRegression(random_state=17)
parameters = {'C':range(1, 11)}
grid_search_lg = GridSearchCV(clf, parameters, cv=5)
grid_search_lg.fit(X_train_one, y_train_one)
plot_scores(grid_search_lg)
grid_search_lg.best_estimator_
print("Accuracy during cross-validation: {} ".format(grid_search_lg.best_score_))
y_pred_need = grid_search_lg.predict(X_test_one)
print('Accuracy on test set for logistic regression: {}'.format(accuracy_score(y_test_one, y_pred_need)))
scores = []
scores.append(accuracy_score(y_test_one, y_pred_need))
X = [" ".join(x) for x in data['stemming']]
twogramm_vectorizer = CountVectorizer(ngram_range=(1,2))
X = twogramm_vectorizer.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.1, random_state=17, shuffle=True)
clf = LogisticRegression(random_state=17)
parameters = {'C':range(1, 10)}
grid_search_lg = GridSearchCV(clf, parameters, cv=5)
grid_search_lg.fit(X_train, y_train)
plot_scores(grid_search_lg)
grid_search_lg.best_estimator_
print("Accuracy during cross-validation: {} ".format(grid_search_lg.best_score_))
y_pred = grid_search_lg.predict(X_test)
print('Accuracy on test set for logistic regression: {}'.format(accuracy_score(y_test, y_pred)))
scores.append(accuracy_score(y_test, y_pred))
X = [" ".join(x) for x in data['stemming']]
twogramm_vectorizer = CountVectorizer(ngram_range=(1,3))
X = twogramm_vectorizer.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.1, random_state=17, shuffle=True)
clf = LogisticRegression(random_state=17)
parameters = {'C':range(1, 10)}
grid_search_lg = GridSearchCV(clf, parameters, cv=5)
grid_search_lg.fit(X_train, y_train)
plot_scores(grid_search_lg)
print("Accuracy during cross-validation: {} ".format(grid_search_lg.best_score_))
y_pred = grid_search_lg.predict(X_test)
print('Accuracy on test set for logistic regression: {}'.format(accuracy_score(y_test, y_pred)))
scores.append(accuracy_score(y_test, y_pred))
n = [1, 2, 3]
plt.plot(n, scores)
plt.xlabel('n-grams')
plt.ylabel('score on test set')
plt.title('accuracy on different n-grams');
print(classification_report(y_test_one, y_pred_need, target_names=['ham', 'spam']))
| 0.650467 | 0.76895 |
```
from google.colab import drive
drive.mount('/gdrive')
%cd /gdrive/
%cd/gdrive/Shareddrives/BA840
import pandas as pd
import numpy as np
ls
db = pd.read_csv("database.csv")
data = pd.read_csv("database.csv")
db.head()
dt = db.copy()
dt.info()
dt.describe()
cs = dt[dt["Crime Solved"] == "Yes"]
cs.head()
cs['Perpetrator Age'] = cs['Perpetrator Age'].astype("int32")
cs.describe()
cs.groupby("Perpetrator Sex").mean()[["Perpetrator Age","Victim Age"]]
cs.groupby("Perpetrator Sex").count()[["Perpetrator Age"]]
cs.groupby("Perpetrator Race").describe()[["Perpetrator Age","Victim Age"]]
cs.groupby("Relationship").count()[["Incident"]]
data = data[data['Crime Solved'] == 'Yes']
cols_to_drop += ['Crime Solved']
data['Perpetrator Age'] = data['Perpetrator Age'].astype("int64")
data['Perpetrator Age category'] = np.where(data['Perpetrator Age'] > 64, 'Elder', np.where(data['Perpetrator Age'] < 25, 'Young', 'Adult'))
Y_columns = ['Perpetrator Sex', 'Perpetrator Race', 'Perpetrator Age category']
ignore_columns = ['Crime Solved']
cat_columns = []
num_columns = []
for col in data.columns.values:
if col in Y_columns+ignore_columns:
continue
elif data[col].dtypes == 'int64':
num_columns += [col]
else:
cat_columns += [col]
median_val = pd.Series()
for col in num_columns:
if col not in cols_to_drop:
median_val[col] = data[col].median()
data = handle_missing_values(data, median_val)
data.drop(cols_to_drop, axis=1, inplace=True)
def handle_missing_values(data, median_val):
df = data.copy()
for col in df:
if col in median_val.index.values:
df[col] = df[col].fillna(median_val[col])
else:
df[col] = df[col].fillna("Missing value")
return df
categorical_features = cat_columns + ['Perpetrator Sex', 'Perpetrator Race', 'Perpetrator Age category']
# categorical_features = categorical_features
categorical_features_idx = [np.where(data.columns.values == col)[0][0] for col in categorical_features]
del cat_columns
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
data_encoded = data.copy()
categorical_names = {}
encoders = {}
# Use Label Encoder for categorical columns (including target column)
for feature in categorical_features:
le = LabelEncoder()
le.fit(data_encoded[feature])
data_encoded[feature] = le.transform(data_encoded[feature])
categorical_names[feature] = le.classes_
encoders[feature] = le
numerical_features = [c for c in data.columns.values if c not in categorical_features]
for feature in numerical_features:
val = data_encoded[feature].values[:, np.newaxis]
mms = MinMaxScaler().fit(val)
data_encoded[feature] = mms.transform(val)
encoders[feature] = mms
data_encoded = data_encoded.astype(float)
del num_columns
def decode_dataset(data, encoders, numerical_features, categorical_features):
df = data.copy()
for feat in df.columns.values:
if feat in numerical_features:
df[feat] = encoders[feat].inverse_transform(np.array(df[feat]).reshape(-1, 1))
for feat in categorical_features:
df[feat] = encoders[feat].inverse_transform(df[feat].astype(int))
return df
decode_dataset(data_encoded, encoders=encoders, numerical_features=numerical_features, categorical_features=categorical_features).head()
data_perp_sex = data_encoded.drop(['Perpetrator Race','Perpetrator Age category','Perpetrator Age'], axis=1)
privileged_sex = np.where(categorical_names['Victim Sex'] == 'Male')[0]
privileged_race = np.where(categorical_names['Victim Race'] == 'White')[0]
pip install aif360
from aif360.datasets import StandardDataset
data_orig_sex = StandardDataset(data_perp_sex,
label_name='Perpetrator Sex',
favorable_classes=[1],
protected_attribute_names=['Victim Sex', 'Victim Race'],
privileged_classes=[privileged_sex, privileged_race])
def meta_data(dataset):
# print out some labels, names, etc.
display(Markdown("#### Dataset shape"))
print(dataset.features.shape)
display(Markdown("#### Favorable and unfavorable labels"))
print(dataset.favorable_label, dataset.unfavorable_label)
display(Markdown("#### Protected attribute names"))
print(dataset.protected_attribute_names)
display(Markdown("#### Privileged and unprivileged protected attribute values"))
print(dataset.privileged_protected_attributes, dataset.unprivileged_protected_attributes)
display(Markdown("#### Dataset feature names"))
print(dataset.feature_names)
from IPython.display import Markdown, display
meta_data(data_orig_sex)
np.random.seed(42)
data_orig_sex_train, data_orig_sex_test = data_orig_sex.split([0.7], shuffle=True)
display(Markdown("#### Train Dataset shape"))
print("Perpetrator Sex :",data_orig_sex_train.features.shape)
display(Markdown("#### Test Dataset shape"))
print("Perpetrator Sex :",data_orig_sex_test.features.shape)
from sklearn.ensemble import RandomForestClassifier
rf_orig_sex = RandomForestClassifier().fit(data_orig_sex_train.features,
data_orig_sex_train.labels.ravel(),
sample_weight=data_orig_sex_train.instance_weights)
X_test_sex = data_orig_sex_test.features
y_test_sex = data_orig_sex_test.labels.ravel()
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
import matplotlib.pyplot as plt
import seaborn as sns
def get_model_performance(X_test, y_true, y_pred, probs):
accuracy = accuracy_score(y_true, y_pred)
matrix = confusion_matrix(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
preds = probs[:, 1]
fpr, tpr, threshold = roc_curve(y_true, preds)
roc_auc = auc(fpr, tpr)
return accuracy, matrix, f1, fpr, tpr, roc_auc
def plot_model_performance(model, X_test, y_true):
y_pred = model.predict(X_test)
probs = model.predict_proba(X_test)
accuracy, matrix, f1, fpr, tpr, roc_auc = get_model_performance(X_test, y_true, y_pred, probs)
display(Markdown('#### Accuracy of the model :'))
print(accuracy)
display(Markdown('#### F1 score of the model :'))
print(f1)
fig = plt.figure(figsize=(15, 6))
ax = fig.add_subplot(1, 2, 1)
sns.heatmap(matrix, annot=True, cmap='Blues', fmt='g')
plt.title('Confusion Matrix')
ax = fig.add_subplot(1, 2, 2)
lw = 2
plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic curve')
plt.legend(loc="lower right")
plot_model_performance(rf_orig_sex, data_orig_sex_test.features, y_test_sex)
## Fairness
import random
random.randint(1,5)
```
|
github_jupyter
|
from google.colab import drive
drive.mount('/gdrive')
%cd /gdrive/
%cd/gdrive/Shareddrives/BA840
import pandas as pd
import numpy as np
ls
db = pd.read_csv("database.csv")
data = pd.read_csv("database.csv")
db.head()
dt = db.copy()
dt.info()
dt.describe()
cs = dt[dt["Crime Solved"] == "Yes"]
cs.head()
cs['Perpetrator Age'] = cs['Perpetrator Age'].astype("int32")
cs.describe()
cs.groupby("Perpetrator Sex").mean()[["Perpetrator Age","Victim Age"]]
cs.groupby("Perpetrator Sex").count()[["Perpetrator Age"]]
cs.groupby("Perpetrator Race").describe()[["Perpetrator Age","Victim Age"]]
cs.groupby("Relationship").count()[["Incident"]]
data = data[data['Crime Solved'] == 'Yes']
cols_to_drop += ['Crime Solved']
data['Perpetrator Age'] = data['Perpetrator Age'].astype("int64")
data['Perpetrator Age category'] = np.where(data['Perpetrator Age'] > 64, 'Elder', np.where(data['Perpetrator Age'] < 25, 'Young', 'Adult'))
Y_columns = ['Perpetrator Sex', 'Perpetrator Race', 'Perpetrator Age category']
ignore_columns = ['Crime Solved']
cat_columns = []
num_columns = []
for col in data.columns.values:
if col in Y_columns+ignore_columns:
continue
elif data[col].dtypes == 'int64':
num_columns += [col]
else:
cat_columns += [col]
median_val = pd.Series()
for col in num_columns:
if col not in cols_to_drop:
median_val[col] = data[col].median()
data = handle_missing_values(data, median_val)
data.drop(cols_to_drop, axis=1, inplace=True)
def handle_missing_values(data, median_val):
df = data.copy()
for col in df:
if col in median_val.index.values:
df[col] = df[col].fillna(median_val[col])
else:
df[col] = df[col].fillna("Missing value")
return df
categorical_features = cat_columns + ['Perpetrator Sex', 'Perpetrator Race', 'Perpetrator Age category']
# categorical_features = categorical_features
categorical_features_idx = [np.where(data.columns.values == col)[0][0] for col in categorical_features]
del cat_columns
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
data_encoded = data.copy()
categorical_names = {}
encoders = {}
# Use Label Encoder for categorical columns (including target column)
for feature in categorical_features:
le = LabelEncoder()
le.fit(data_encoded[feature])
data_encoded[feature] = le.transform(data_encoded[feature])
categorical_names[feature] = le.classes_
encoders[feature] = le
numerical_features = [c for c in data.columns.values if c not in categorical_features]
for feature in numerical_features:
val = data_encoded[feature].values[:, np.newaxis]
mms = MinMaxScaler().fit(val)
data_encoded[feature] = mms.transform(val)
encoders[feature] = mms
data_encoded = data_encoded.astype(float)
del num_columns
def decode_dataset(data, encoders, numerical_features, categorical_features):
df = data.copy()
for feat in df.columns.values:
if feat in numerical_features:
df[feat] = encoders[feat].inverse_transform(np.array(df[feat]).reshape(-1, 1))
for feat in categorical_features:
df[feat] = encoders[feat].inverse_transform(df[feat].astype(int))
return df
decode_dataset(data_encoded, encoders=encoders, numerical_features=numerical_features, categorical_features=categorical_features).head()
data_perp_sex = data_encoded.drop(['Perpetrator Race','Perpetrator Age category','Perpetrator Age'], axis=1)
privileged_sex = np.where(categorical_names['Victim Sex'] == 'Male')[0]
privileged_race = np.where(categorical_names['Victim Race'] == 'White')[0]
pip install aif360
from aif360.datasets import StandardDataset
data_orig_sex = StandardDataset(data_perp_sex,
label_name='Perpetrator Sex',
favorable_classes=[1],
protected_attribute_names=['Victim Sex', 'Victim Race'],
privileged_classes=[privileged_sex, privileged_race])
def meta_data(dataset):
# print out some labels, names, etc.
display(Markdown("#### Dataset shape"))
print(dataset.features.shape)
display(Markdown("#### Favorable and unfavorable labels"))
print(dataset.favorable_label, dataset.unfavorable_label)
display(Markdown("#### Protected attribute names"))
print(dataset.protected_attribute_names)
display(Markdown("#### Privileged and unprivileged protected attribute values"))
print(dataset.privileged_protected_attributes, dataset.unprivileged_protected_attributes)
display(Markdown("#### Dataset feature names"))
print(dataset.feature_names)
from IPython.display import Markdown, display
meta_data(data_orig_sex)
np.random.seed(42)
data_orig_sex_train, data_orig_sex_test = data_orig_sex.split([0.7], shuffle=True)
display(Markdown("#### Train Dataset shape"))
print("Perpetrator Sex :",data_orig_sex_train.features.shape)
display(Markdown("#### Test Dataset shape"))
print("Perpetrator Sex :",data_orig_sex_test.features.shape)
from sklearn.ensemble import RandomForestClassifier
rf_orig_sex = RandomForestClassifier().fit(data_orig_sex_train.features,
data_orig_sex_train.labels.ravel(),
sample_weight=data_orig_sex_train.instance_weights)
X_test_sex = data_orig_sex_test.features
y_test_sex = data_orig_sex_test.labels.ravel()
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
import matplotlib.pyplot as plt
import seaborn as sns
def get_model_performance(X_test, y_true, y_pred, probs):
accuracy = accuracy_score(y_true, y_pred)
matrix = confusion_matrix(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
preds = probs[:, 1]
fpr, tpr, threshold = roc_curve(y_true, preds)
roc_auc = auc(fpr, tpr)
return accuracy, matrix, f1, fpr, tpr, roc_auc
def plot_model_performance(model, X_test, y_true):
y_pred = model.predict(X_test)
probs = model.predict_proba(X_test)
accuracy, matrix, f1, fpr, tpr, roc_auc = get_model_performance(X_test, y_true, y_pred, probs)
display(Markdown('#### Accuracy of the model :'))
print(accuracy)
display(Markdown('#### F1 score of the model :'))
print(f1)
fig = plt.figure(figsize=(15, 6))
ax = fig.add_subplot(1, 2, 1)
sns.heatmap(matrix, annot=True, cmap='Blues', fmt='g')
plt.title('Confusion Matrix')
ax = fig.add_subplot(1, 2, 2)
lw = 2
plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic curve')
plt.legend(loc="lower right")
plot_model_performance(rf_orig_sex, data_orig_sex_test.features, y_test_sex)
## Fairness
import random
random.randint(1,5)
| 0.423696 | 0.443118 |
This is a classification example to show how to use Oboe for training and testing, in the context of AutoML, i.e., do pipeline selection on the training set and then evaluate the performance of the selected model on the test set.
```
method = 'tensoroboe' # 'Oboe' or 'TensorOboe'
problem_type = 'classification'
from oboe import AutoLearner, error # This may take around 15 seconds at first run.
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import time
data = load_iris()
x = np.array(data['data'])
y = np.array(data['target'])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# disable warnings
import warnings
warnings.filterwarnings('ignore')
```
# Example 1: a no-brainer use
The default `TensorOboe` running mode is `warm`, which means the meta-training is warm-started with pre-imputed error tensor.
```
# initialize the autolearner class
m = AutoLearner(p_type='classification', runtime_limit=50, method=method, verbose=True)
# fit autolearner on training set and record runtime
start = time.time()
m.fit(x_train, y_train, categorical=None) # TensorOboe accepts the list of feature types
elapsed_time = time.time() - start
# use the fitted autolearner for prediction on test set
y_predicted = m.predict(x_test)
print("prediction error: {}".format(error(y_test, y_predicted, 'classification')))
print("elapsed time: {}".format(elapsed_time))
# get names of the selected machine learning models
m.get_models()
```
# Example 2: build an ensemble of models with given configurations
```
#experimental settings
VERBOSE = False #whether to print out information indicating current fitting progress
N_CORES = 1 #number of cores
RUNTIME_BUDGET = 15
# #optional: limit the types of algorithms (not yet supported)
# s = ['AB', 'ExtraTrees', 'GNB', 'KNN', 'RF', 'DT']
#autolearner arguments
autolearner_kwargs = {
'p_type': 'classification',
'method': method,
'runtime_limit': RUNTIME_BUDGET,
'verbose': VERBOSE,
'selection_method': 'min_variance',
'stacking_alg': 'greedy',
'n_cores': N_CORES,
'build_ensemble': True,
}
#intialize the autolearner class
m = AutoLearner(**autolearner_kwargs)
# fit autolearner on training set and record runtime
start = time.time()
m.fit(x_train, y_train, categorical=None)
elapsed_time = time.time() - start
# use the fitted autolearner for prediction on test set
y_predicted = m.predict(x_test)
print("prediction error: {}".format(error(y_test, y_predicted, 'classification')))
print("elapsed time: {}".format(elapsed_time))
# get names of the selected machine learning models
m.get_models()
```
|
github_jupyter
|
method = 'tensoroboe' # 'Oboe' or 'TensorOboe'
problem_type = 'classification'
from oboe import AutoLearner, error # This may take around 15 seconds at first run.
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import time
data = load_iris()
x = np.array(data['data'])
y = np.array(data['target'])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# disable warnings
import warnings
warnings.filterwarnings('ignore')
# initialize the autolearner class
m = AutoLearner(p_type='classification', runtime_limit=50, method=method, verbose=True)
# fit autolearner on training set and record runtime
start = time.time()
m.fit(x_train, y_train, categorical=None) # TensorOboe accepts the list of feature types
elapsed_time = time.time() - start
# use the fitted autolearner for prediction on test set
y_predicted = m.predict(x_test)
print("prediction error: {}".format(error(y_test, y_predicted, 'classification')))
print("elapsed time: {}".format(elapsed_time))
# get names of the selected machine learning models
m.get_models()
#experimental settings
VERBOSE = False #whether to print out information indicating current fitting progress
N_CORES = 1 #number of cores
RUNTIME_BUDGET = 15
# #optional: limit the types of algorithms (not yet supported)
# s = ['AB', 'ExtraTrees', 'GNB', 'KNN', 'RF', 'DT']
#autolearner arguments
autolearner_kwargs = {
'p_type': 'classification',
'method': method,
'runtime_limit': RUNTIME_BUDGET,
'verbose': VERBOSE,
'selection_method': 'min_variance',
'stacking_alg': 'greedy',
'n_cores': N_CORES,
'build_ensemble': True,
}
#intialize the autolearner class
m = AutoLearner(**autolearner_kwargs)
# fit autolearner on training set and record runtime
start = time.time()
m.fit(x_train, y_train, categorical=None)
elapsed_time = time.time() - start
# use the fitted autolearner for prediction on test set
y_predicted = m.predict(x_test)
print("prediction error: {}".format(error(y_test, y_predicted, 'classification')))
print("elapsed time: {}".format(elapsed_time))
# get names of the selected machine learning models
m.get_models()
| 0.730386 | 0.941439 |
# Comprehensive Plotting How-To
```
import qcodes as qc
```
Plotting data in QCoDeS can be done using either MatPlot or QTPlot, with matplotlib and pyqtgraph as backends, respectively.
MatPlot and QTPlot tailor these plotting backends to QCoDeS, providing many features.
For example, when plotting a DataArray in a DataSet, the corresponding ticks, labels, etc. are automatically added to the plot.
Both MatPlot and QTPlot support live plotting while a measurement is running
One of the main differences between the two backends is that matplotlib is more strongly integrated with Jupyter Notebook, while pyqtgraph uses the PyQT GUI.
For matplotlib, this has the advantage that plots can be displayed within a notebook (though it also has a gui).
The advantage of pyqtgraph is that it can be easily embedded in PyQT GUI's.
This guide aims to provide a detailed guide on how to use each of the two plotting tools.
```
loc_provider = qc.data.location.FormatLocation(fmt='data/{date}/#{counter}_{name}_{time}')
qc.data.data_set.DataSet.location_provider=loc_provider
```
## MatPlot
The QCoDeS MatPlot relies on the matplotlib package, which is quite similar to Matlab's plotting tools.
It integrates nicely with Jupyter notebook, and as a result, interactive plots can be displayed within a notebook using the following command:
```
%matplotlib notebook
```
### Simple 1D sweep
As a first example, we perform a simple 1D sweep.
We create two trivial parameters, one for measuring a value, and the other for sweeping the value of the measured parameter.
```
p_measure = qc.ManualParameter(name='measured_val')
p_sweep = qc.Parameter(name='sweep_val', set_cmd=p_measure.set)
```
Next we perform a measurement, and attach the `update` method of the `plot` object to the loop, resulting in live plotting.
Note that the resulting plot automatically has the correct x values and labels.
```
loop = qc.Loop(
p_sweep.sweep(0, 20, step=1), delay=0.05).each(
p_measure)
data = loop.get_data_set(name='test_plotting_1D')
# Create plot for measured data
plot = qc.MatPlot(data.measured_val)
# Attach updating of plot to loop
loop.with_bg_task(plot.update)
loop.run();
```
### Subplots
In a measurement, there is often more than a single parameter that is measured.
MatPlot supports multiple subplots, and upon initialization it will create a subplot for each of the arguments it receives.
Let us create a second parameter that, when measured, always returns the value 10.
```
p_measure2 = qc.ManualParameter(name='measured_val_2', initial_value=10)
```
In the example below, three arguments are provided, resulting in three subplots.
By default, subplots will be placed as columns on a single row, up to three columns.
After this, a new row will be created (can be overridden in `MatPlot.max_subplot_columns`).
Multiple DataArrays can also be plotted in a single subplot by passing them as a list in a single arg.
As an example, notice how the first subplot shows multiple values.
```
loop = qc.Loop(
p_sweep.sweep(0, 20, step=1), delay=0.05).each(
p_measure,
p_measure2)
data = loop.get_data_set(name='test_plotting_1D_2')
# Create plot for measured data
plot = qc.MatPlot([data.measured_val, data.measured_val_2], data.measured_val, data.measured_val_2)
# Attach updating of plot to loop
loop.with_bg_task(plot.update)
loop.run();
```
The data arrays don't all have to be passed along during initialization of the MatPlot instance.
We can access the subplots of the plot object as if the plot was a list (e.g. `plot[0]` would give you the first subplot).
To illustrate this, the example below results in the same plot as above.
```
loop = qc.Loop(
p_sweep.sweep(0, 20, step=1), delay=0.05).each(
p_measure,
p_measure2)
data = loop.get_data_set(name='test_plotting_1D_3')
# Create plot for measured data
plot = qc.MatPlot(subplots=3)
plot[0].add(data.measured_val)
plot[0].add(data.measured_val_2)
plot[1].add(data.measured_val)
plot[2].add(data.measured_val_2)
# Attach updating of plot to loop
loop.with_bg_task(plot.update)
loop.run();
```
Note that we passed the kwarg `subplots=3` to specify that we need 3 subplots.
the `subplots` kwarg can be either an int or a tuple.
If it is an int, it will segment the value such that there are at most three columns.
If a tuple is provided, its first element indicates the number of rows, and the second the number of columns.
Furthermore, the size of the figure is automatically computed based on the number of subplots.
This can be overridden by passing the kwarg `figsize=(x_length, y_length)` upon initialization.
Additionally, `MatPlot.default_figsize` can be overridden to change the default computed figsize for a given subplot dimensionality.
### 2D Plots
As illustrated below, MatPlot can also plot two-dimensional data arrays.
MatPlot automatically handles setting the appropriate x- and y-axes, and also adds a colorbar by default.
Note that we can also plot the individual traces of a 2D array, as shown in the first subplot below.
This is done by passing all the elements (=rows) of the 2D array as a single argument using the splat (*) operator.
```
p_sweep2 = qc.Parameter(name='sweep_val_2', set_cmd=p_measure2.set)
loop = qc.Loop(
p_sweep.sweep(0, 20, step=1), delay=0.05).loop(
p_sweep2.sweep(0, 10, step=1), delay=0.01).each(
p_measure)
data = loop.get_data_set(name='test_plotting_2D')
# Create plot for measured data
plot = qc.MatPlot([*data.measured_val], data.measured_val)
# Attach updating of plot to loop
loop.with_bg_task(plot.update)
loop.run();
```
In the example above, the colorbar can be accessed via `plot[1].qcodes_colorbar`.
This can be useful when you want to modify the colorbar (e.g. change the color limits `clim`).
Note that the above plot was updated every time an inner loop was completed.
This is because the update method was attached to the outer loop.
If you instead want it to update within an outer loop, you have to attach it to an inner loop: `loop[0].with_bg_task(plot.update)` (`loop[0]` is the first action of the outer loop, which is the inner loop).
### Interfacing with Matplotlib
As Matplot is built directly on top of Matplotlib, you can use standard Matplotlib functions which are readily available online in Matplotlib documentation as well as StackOverflow and similar sites. Here, we first perform the same measurement and obtain the corresponding figure:
```
loop = qc.Loop(
p_sweep.sweep(0, 20, step=1), delay=0.05).loop(
p_sweep2.sweep(0, 10, step=1), delay=0.01).each(
p_measure)
data = loop.get_data_set(name='test_plotting_2D_2')
# Create plot for measured data
plot = qc.MatPlot([*data.measured_val], data.measured_val)
# Attach updating of plot to loop
loop.with_bg_task(plot.update)
loop.run();
```
To use the matplotlib api, we need access to the matplotlib Figure and Axis objects.
Each subplot has its correspond Axis object, which are grouped together into a single Figure object.
A subplot Axis can be accessed via its index. As an example, we will modify the title of the first axis:
```
ax = plot[0] # shorthand for plot.subplots[0]
ax.set_title("My left subplot title");
```
Note that this returns the actual matplotlib Axis object.
It does have the additional QCoDeS method `Axis.add()`, which allows easily adding of a QCoDeS DataArray. See http://matplotlib.org/api/axes_api.html for documentation of the Matplotlib Axes class.
The Matplotlib Figure object can be accessed via the fig attribute on the QCoDeS Matplot object:
```
fig = plot.fig
fig.tight_layout();
```
See http://matplotlib.org/api/figure_api.html for documentation of the Matplotlib Figure class.
Matplotlib also offers a second way to modify plots, namely pyplot.
This can be imported via:
```
from matplotlib import pyplot as plt
```
In pyplot, there is always an active axis and figure, similar to Matlab plotting.
Every time a new plot is created, it will update the active axis and figure.
The active Figure and Axis can be changed via `plt.scf(fig)` and `plt.sca(ax)`, respectively.
As an example, the following code will change the title of the last-created plot (the right subplot of the previous figure)
```
plt.title('My right subplot title');
```
See https://matplotlib.org/users/pyplot_tutorial.html for documentation on Pyplot
### Event handling
Since matplotlib is an interactive plotting tool, one can program actions that are dependent on events.
There are many events, such as clicking on a plot, pressing a key, etc.
As an example, we can attach a trivial function to occur when the plot object is closed. You can replace this with other functionality, such as stopping the loop.
```
def handle_close(event):
print('Plot closed')
plot = qc.MatPlot()
plot.fig.canvas.mpl_connect('close_event', handle_close);
```
On a related note, matplotlib also has widgets that can be added to plots, allowing additional interactivity with the dataset.
An example would be adding a slider to show 2D plots of a 3D dataset (e.g. https://matplotlib.org/examples/widgets/slider_demo.html).
## QTPlot
To be written
|
github_jupyter
|
import qcodes as qc
loc_provider = qc.data.location.FormatLocation(fmt='data/{date}/#{counter}_{name}_{time}')
qc.data.data_set.DataSet.location_provider=loc_provider
%matplotlib notebook
p_measure = qc.ManualParameter(name='measured_val')
p_sweep = qc.Parameter(name='sweep_val', set_cmd=p_measure.set)
loop = qc.Loop(
p_sweep.sweep(0, 20, step=1), delay=0.05).each(
p_measure)
data = loop.get_data_set(name='test_plotting_1D')
# Create plot for measured data
plot = qc.MatPlot(data.measured_val)
# Attach updating of plot to loop
loop.with_bg_task(plot.update)
loop.run();
p_measure2 = qc.ManualParameter(name='measured_val_2', initial_value=10)
loop = qc.Loop(
p_sweep.sweep(0, 20, step=1), delay=0.05).each(
p_measure,
p_measure2)
data = loop.get_data_set(name='test_plotting_1D_2')
# Create plot for measured data
plot = qc.MatPlot([data.measured_val, data.measured_val_2], data.measured_val, data.measured_val_2)
# Attach updating of plot to loop
loop.with_bg_task(plot.update)
loop.run();
loop = qc.Loop(
p_sweep.sweep(0, 20, step=1), delay=0.05).each(
p_measure,
p_measure2)
data = loop.get_data_set(name='test_plotting_1D_3')
# Create plot for measured data
plot = qc.MatPlot(subplots=3)
plot[0].add(data.measured_val)
plot[0].add(data.measured_val_2)
plot[1].add(data.measured_val)
plot[2].add(data.measured_val_2)
# Attach updating of plot to loop
loop.with_bg_task(plot.update)
loop.run();
p_sweep2 = qc.Parameter(name='sweep_val_2', set_cmd=p_measure2.set)
loop = qc.Loop(
p_sweep.sweep(0, 20, step=1), delay=0.05).loop(
p_sweep2.sweep(0, 10, step=1), delay=0.01).each(
p_measure)
data = loop.get_data_set(name='test_plotting_2D')
# Create plot for measured data
plot = qc.MatPlot([*data.measured_val], data.measured_val)
# Attach updating of plot to loop
loop.with_bg_task(plot.update)
loop.run();
loop = qc.Loop(
p_sweep.sweep(0, 20, step=1), delay=0.05).loop(
p_sweep2.sweep(0, 10, step=1), delay=0.01).each(
p_measure)
data = loop.get_data_set(name='test_plotting_2D_2')
# Create plot for measured data
plot = qc.MatPlot([*data.measured_val], data.measured_val)
# Attach updating of plot to loop
loop.with_bg_task(plot.update)
loop.run();
ax = plot[0] # shorthand for plot.subplots[0]
ax.set_title("My left subplot title");
fig = plot.fig
fig.tight_layout();
from matplotlib import pyplot as plt
plt.title('My right subplot title');
def handle_close(event):
print('Plot closed')
plot = qc.MatPlot()
plot.fig.canvas.mpl_connect('close_event', handle_close);
| 0.520253 | 0.993391 |
# Train a text labeler
The [Hugging Face Model Hub](https://huggingface.co/models) has a wide range of models that can handle many tasks. While these models perform well, the best performance often is found when fine-tuning a model with task-specific data.
Hugging Face provides a [number of full-featured examples](https://github.com/huggingface/transformers/tree/master/examples) available to assist with training task-specific models. When building models from the command line, these scripts are a great way to get started.
txtai provides a training pipeline that can be used to train new models programatically using the Transformers Trainer framework. The training pipeline supports the following:
- Building transient models without requiring an output directory
- Load training data from Hugging Face datasets, pandas DataFrames and list of dicts
- Text sequence classification tasks (single/multi label classification and regression) including all GLUE tasks
- All training arguments
This notebook shows examples of how to use txtai to train/fine-tune new models.
# Install dependencies
Install `txtai` and all dependencies.
```
%%capture
!pip install git+https://github.com/neuml/txtai datasets pandas
```
# Train a model
Let's get right to it! The following example fine-tunes a tiny Bert model with the sst2 dataset.
The trainer pipeline is basically a one-liner that fine-tunes any text classification/regression model available (locally and/or from the HF Hub).
```
from datasets import load_dataset
from txtai.pipeline import HFTrainer
trainer = HFTrainer()
# Hugging Face dataset
ds = load_dataset("glue", "sst2")
model, tokenizer = trainer("google/bert_uncased_L-2_H-128_A-2", ds["train"], columns=("sentence", "label"))
```
The default trainer pipeline functionality will not store any logs, checkpoints or models to disk. The trainer can take any of the standard TrainingArguments to enable persistent models.
The next section creates a Labels pipeline using the newly built model and runs the model against the sst2 validation set.
```
from txtai.pipeline import Labels
labels = Labels((model, tokenizer), dynamic=False)
# Determine accuracy on validation set
results = [row["label"] == labels(row["sentence"])[0][0] for row in ds["validation"]]
sum(results) / len(ds["validation"])
```
81.88% accuracy - not bad for a tiny Bert model.
# Train a model with Lists
As mentioned earlier, the trainer pipeline supports Hugging Face datasets, pandas DataFrames and lists of dicts. The example below trains a model using lists.
```
data = [{"text": "This is a test sentence", "label": 0}, {"text": "This is not a test", "label": 1}]
model, tokenizer = trainer("google/bert_uncased_L-2_H-128_A-2", data)
```
# Train a model with DataFrames
The next section builds a new model using data stored in a pandas DataFrame.
```
import pandas as pd
df = pd.DataFrame(data)
model, tokenizer = trainer("google/bert_uncased_L-2_H-128_A-2", data)
```
# Train a regression model
The previous models were classification tasks. The following model trains a sentence similarity model with a regression output per sentence pair between 0 (dissimilar) and 1 (similar).
```
ds = load_dataset("glue", "stsb")
model, tokenizer = trainer("google/bert_uncased_L-2_H-128_A-2", ds["train"], columns=("sentence1", "sentence2", "label"))
labels = Labels((model, tokenizer), dynamic=False)
labels([("Sailing to the arctic", "Dogs and cats don't get along"),
("Walking down the road", "Walking down the street")])
```
|
github_jupyter
|
%%capture
!pip install git+https://github.com/neuml/txtai datasets pandas
from datasets import load_dataset
from txtai.pipeline import HFTrainer
trainer = HFTrainer()
# Hugging Face dataset
ds = load_dataset("glue", "sst2")
model, tokenizer = trainer("google/bert_uncased_L-2_H-128_A-2", ds["train"], columns=("sentence", "label"))
from txtai.pipeline import Labels
labels = Labels((model, tokenizer), dynamic=False)
# Determine accuracy on validation set
results = [row["label"] == labels(row["sentence"])[0][0] for row in ds["validation"]]
sum(results) / len(ds["validation"])
data = [{"text": "This is a test sentence", "label": 0}, {"text": "This is not a test", "label": 1}]
model, tokenizer = trainer("google/bert_uncased_L-2_H-128_A-2", data)
import pandas as pd
df = pd.DataFrame(data)
model, tokenizer = trainer("google/bert_uncased_L-2_H-128_A-2", data)
ds = load_dataset("glue", "stsb")
model, tokenizer = trainer("google/bert_uncased_L-2_H-128_A-2", ds["train"], columns=("sentence1", "sentence2", "label"))
labels = Labels((model, tokenizer), dynamic=False)
labels([("Sailing to the arctic", "Dogs and cats don't get along"),
("Walking down the road", "Walking down the street")])
| 0.671255 | 0.988928 |
# **SVR with Normalize & PowerTransformer**
This Code template is for the regression analysis using a SVR where separate rescaling is done using Normalize and feature transformation is done using PowerTransformer in a pipeline.
### **Required Packages**
```
import warnings
import numpy as np
import pandas as pd
import seaborn as se
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize, QuantileTransformer,Normalizer
from sklearn.pipeline import make_pipeline
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.svm import SVR
warnings.filterwarnings('ignore')
```
### **Initialization**
Filepath of CSV file
```
file_path= ""
```
List of features which are required for model training .
```
features = []
```
Target feature for prediction.
```
target = ''
```
### **Dataset Overview**
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### **Feature Selection**
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
X=df[features]
Y=df[target]
```
### **Data Preprocessing**
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
```
#### **Correlation Map**
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
```
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
```
## **Data Splitting**
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
```
### Data Scaling
Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1, l2 or inf) equals one.
[For more Reference](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Normalizer.html)
```
nz = Normalizer()
x_train = nz.fit_transform(x_train)
x_test = nz.transform(x_test)
```
### **Power Transformer**
Apply a power transform featurewise to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations that are applied to make data more Gaussian-like. This is useful for modeling issues related to heteroscedasticity (non-constant variance), or other situations where normality is desired.
[For more Reference](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html)
### **Model**
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
A Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.
Here we will use SVR, the svr implementation is based on libsvm. The fit time scales at least quadratically with the number of samples and maybe impractical beyond tens of thousands of samples.
**Model Tuning Parameters**
> C : float, default=1.0 -> Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. The penalty is a squared l2 penalty..
> kernel : {‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’}, default=’rbf’ -> Specifies the kernel type to be used in the algorithm. It must be one of ‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’ or a callable. If none is given, ‘rbf’ will be used. If a callable is given it is used to pre-compute the kernel matrix from data matrices; that matrix should be an array of shape (n_samples, n_samples).
> gamma : {‘scale’, ‘auto’} or float, default=’scale’ ->Gamma is a hyperparameter that we have to set before the training model. Gamma decides how much curvature we want in a decision boundary.
> degree : int, default=3 -> Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels.Using degree 1 is similar to using a linear kernel. Also, increasing degree parameter leads to higher training times.
```
model = make_pipeline(PowerTransformer(),SVR())
model.fit(x_train,y_train)
```
#### **Model Accuracy**
We will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.
score: The score function returns the coefficient of determination R2 of the prediction.
```
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
```
> r2_score: The r2_score function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions.
> mae: The mean abosolute error function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model.
> mse: The mean squared error function squares the error(penalizes the model for large errors) by our model.
```
y_pred=model.predict(x_test)
print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100))
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred)))
```
#### **Prediction Plot**
> First, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis. For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.
```
plt.figure(figsize=(14,10))
plt.plot(range(20),y_test[0:20], color = "green")
plt.plot(range(20),model.predict(x_test[0:20]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
```
#### Creator: Jay Shimpi , Github: [Profile](https://github.com/JayShimpi22)
|
github_jupyter
|
import warnings
import numpy as np
import pandas as pd
import seaborn as se
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize, QuantileTransformer,Normalizer
from sklearn.pipeline import make_pipeline
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.svm import SVR
warnings.filterwarnings('ignore')
file_path= ""
features = []
target = ''
df=pd.read_csv(file_path)
df.head()
X=df[features]
Y=df[target]
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
nz = Normalizer()
x_train = nz.fit_transform(x_train)
x_test = nz.transform(x_test)
model = make_pipeline(PowerTransformer(),SVR())
model.fit(x_train,y_train)
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
y_pred=model.predict(x_test)
print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100))
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred)))
plt.figure(figsize=(14,10))
plt.plot(range(20),y_test[0:20], color = "green")
plt.plot(range(20),model.predict(x_test[0:20]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
| 0.369656 | 0.989368 |
```
from tensorflow_docs.vis import embed
from tensorflow import keras
from tensorflow.keras import backend as K
from imutils import paths
import matplotlib.pyplot as plt
import tensorflow as tf
import pandas as pd
import numpy as np
import imageio
import cv2
import os
from rl_with_videos.preprocessors.convnet import convnet_preprocessor
IMG_SIZE = 48
BATCH_SIZE = 64
EPOCHS = 10
MAX_SEQ_LENGTH = 20
NUM_FEATURES = 2048
TRAINING_FILE = "C:/nyu/DRL/final_project/dataset/UCF101/train.csv"
TESTING_FILE = "C:/nyu/DRL/final_project/dataset/UCF101/test.csv"
LABELS_CLASS = ['CricketShot', 'PlayingCello', 'Punch', 'ShavingBeard', 'TennisSwing']
NUM_CLASSES = 5
train_df = pd.read_csv(TRAINING_FILE)
test_df = pd.read_csv(TESTING_FILE)
print(f"Total videos for training: {len(train_df)}")
print(f"Total videos for testing: {len(test_df)}")
train_df.sample(10)
def crop_center_square(frame):
y, x = frame.shape[0:2]
min_dim = min(y, x)
start_x = (x // 2) - (min_dim // 2)
start_y = (y // 2) - (min_dim // 2)
return frame[start_y : start_y + min_dim, start_x : start_x + min_dim]
def load_video(path, max_frames=0, resize=(IMG_SIZE, IMG_SIZE)):
cap = cv2.VideoCapture(path)
frames = []
try:
while True:
ret, frame = cap.read()
if not ret:
break
frame = crop_center_square(frame)
frame = cv2.resize(frame, resize)
frame = frame[:, :, [2, 1, 0]]
frames.append(frame)
if len(frames) == max_frames:
break
finally:
cap.release()
return np.array(frames)
feature_extractor = convnet_preprocessor([(6912,)], (48,48,3), 256)
feature_extractor.trainable = True
feature_extractor.summary()
class PositionalEmbedding(keras.layers.Layer):
def __init__(self, sequence_length, output_dim, **kwargs):
super().__init__(**kwargs)
self.position_embeddings = keras.layers.Embedding(
input_dim=sequence_length, output_dim=output_dim
)
self.sequence_length = sequence_length
self.output_dim = output_dim
def call(self, inputs):
# The inputs are of shape: `(batch_size, frames, num_features)`
length = tf.shape(inputs)[1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_positions = self.position_embeddings(positions)
return inputs + embedded_positions
# def compute_mask(self, inputs, mask=None):
# mask = tf.reduce_any(tf.cast(inputs, "bool"), axis=-1)
# return mask
class MultiHeadAttention(keras.layers.Layer):
def __init__(self,multiheads, head_dim,mask_right=False,**kwargs):
self.multiheads = multiheads
self.head_dim = head_dim
self.output_dim = multiheads * head_dim
self.mask_right = mask_right
super(MultiHeadAttention, self).__init__(**kwargs)
def compute_output_shape(self,input_shape):
return (input_shape[0][0],input_shape[0][1],self.output_dim) #shape=[batch_size,Q_sequence_length,self.multiheads*self.head_dim]
def build(self,input_shape):
self.WQ = self.add_weight(name='WQ',
shape=(input_shape[0][-1].value, self.output_dim),#input_shape[0] -> Q_seq
initializer='glorot_uniform',
trainable=True)
self.WK = self.add_weight(name='WK',
shape=(input_shape[1][-1].value, self.output_dim),#input_shape[1] -> K_seq
initializer='glorot_uniform',
trainable=True)
self.WV = self.add_weight(name='WV',
shape=(input_shape[2][-1].value, self.output_dim),#input_shape[2] -> V_seq
initializer='glorot_uniform',
trainable=True)
super(MultiHeadAttention, self).build(input_shape)
def Mask(self,inputs,seq_len,mode='add'):
if seq_len == None:
return inputs
else:
mask = K.one_hot(indices=seq_len[:,0],num_classes=K.shape(inputs)[1])#mask.shape=[batch_size,short_sequence_length],mask=[[0,0,0,0,1,0,0,..],[0,1,0,0,0,0,0...]...]
mask = 1 - K.cumsum(mask,axis=1)#mask.shape=[batch_size,short_sequence_length],mask=[[1,1,1,1,0,0,0,...],[1,0,0,0,0,0,0,...]...]
for _ in range(len(inputs.shape)-2):
mask = K.expand_dims(mask, 2)
if mode == 'mul':
return inputs * mask
elif mode == 'add':
return inputs - (1 - mask) * 1e12
def call(self,QKVs):
if len(QKVs) == 3:
Q_seq,K_seq,V_seq = QKVs
Q_len,V_len = None,None
elif len(QKVs) == 5:
Q_seq,K_seq,V_seq,Q_len,V_len = QKVs
Q_seq = K.dot(Q_seq,self.WQ)#Q_seq.shape=[batch_size,Q_sequence_length,self.output_dim]=[batch_size,Q_sequence_length,self.multiheads*self.head_dim]
Q_seq = K.reshape(Q_seq,shape=(-1,K.shape(Q_seq)[1],self.multiheads,self.head_dim))#Q_seq.shape=[batch_size,Q_sequence_length,self.multiheads,self.head_dim]
Q_seq = K.permute_dimensions(Q_seq,pattern=(0,2,1,3))#Q_seq.shape=[batch_size,self.multiheads,Q_sequence_length,self.head_dim]
K_seq = K.dot(K_seq,self.WK)
K_seq = K.reshape(K_seq,shape=(-1,K.shape(K_seq)[1],self.multiheads,self.head_dim))
K_seq = K.permute_dimensions(K_seq,pattern=(0,2,1,3))
V_seq = K.dot(V_seq,self.WV)
V_seq = K.reshape(V_seq,shape=(-1,K.shape(V_seq)[1],self.multiheads,self.head_dim))
V_seq = K.permute_dimensions(V_seq,pattern=(0,2,1,3))
A = K.batch_dot(Q_seq,K_seq,axes=[3,3])/K.sqrt(K.cast(self.head_dim,dtype='float32'))#A.shape=[batch_size,self.multiheads,Q_sequence_length,K_sequence_length]
A = K.permute_dimensions(A,pattern=(0,3,2,1))#A.shape=[batch_size,K_sequence_length,Q_sequence_length,self.multiheads]
A = self.Mask(A,V_len,'add')
A = K.permute_dimensions(A,pattern=(0,3,2,1))#A.shape=[batch_size,self.multiheads,Q_sequence_length,K_sequence_length]
if self.mask_right:
ones = K.ones_like(A[:1,:1])
lower_triangular = K.tf.matrix_band_part(ones,num_lower=-1,num_upper=0)
mask = (ones - lower_triangular) * 1e12
A = A - mask #Element-wise subtract,A.shape=[batch_size,self.multiheads,Q_sequence_length,K_sequence_length]
A = K.softmax(A) #A.shape=[batch_size,self.multiheads,Q_sequence_length,K_sequence_length]
#V_seq.shape=[batch_size,V_sequence_length,V_embedding_dim]
O_seq = K.batch_dot(A,V_seq,axes=[3,2])#O_seq.shape=[batch_size,self.multiheads,Q_sequence_length,V_sequence_length]
O_seq = K.permute_dimensions(O_seq,pattern=(0,2,1,3))#O_seq.shape=[batch_size,Q_sequence_length,self.multiheads,V_sequence_length]
O_seq = K.reshape(O_seq,shape=(-1,K.shape(O_seq)[1],self.output_dim))#O_seq.shape=[,Q_sequence_length,self.multiheads*self.head_dim]
O_seq = self.Mask(O_seq,Q_len,'mul')
return O_seq
class TransformerEncoder(keras.layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = MultiHeadAttention(
num_heads, embed_dim
)
self.dense_proj = keras.Sequential(
[keras.layers.Dense(dense_dim, activation='relu'), keras.layers.Dense(embed_dim),]
)
# self.layernorm_1 = keras.layers.LayerNormalization()
# self.layernorm_2 = keras.layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention([inputs, inputs, inputs])
proj_input = keras.layers.BatchNormalization()(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return keras.layers.BatchNormalization()(proj_input + proj_output)
def Transformer(
input_shapes,
output_size,
feature_extractor,
hidden_state_num = 2,
hidden_state_size = (16, 8),
*args,
**kwargs):
sequence_length = MAX_SEQ_LENGTH
embed_dim = 256
dense_dim = 4
num_heads = 1
video = keras.layers.Input(shape=input_shapes,name='video_input')
encoded_frame = keras.layers.TimeDistributed(keras.layers.Lambda(lambda x: feature_extractor(x)))(video)
x = PositionalEmbedding(
sequence_length, embed_dim, name="frame_position_embedding"
)(encoded_frame)
x = TransformerEncoder(embed_dim, dense_dim, num_heads, name="transformer_layer")(x)
x = keras.layers.GlobalMaxPooling1D()(x)
x = keras.layers.Dropout(0.5)(x)
# encoded_vid = keras.layers.Dense(8, activation='relu')(encoded_vid)
outputs = keras.layers.Dense(output_size, activation='softmax')(x)
model = keras.models.Model(inputs=[video],outputs=outputs)
return model
model = Transformer((None, 6912), 5, feature_extractor)
model.summary()
def label_processor(labels, labels_class):
new_labels = np.zeros(labels.shape)
for i in range(labels.shape[0]):
index = labels_class.index(labels[i])
new_labels[i] = index
return new_labels
def prepare_all_videos(df, root_dir):
num_samples = len(df)
video_paths = df["video_name"].values.tolist()
labels = df["tag"].values
labels = label_processor(labels, LABELS_CLASS)
labels = keras.utils.to_categorical(labels, NUM_CLASSES)
video_batch = np.zeros((num_samples, MAX_SEQ_LENGTH, 6912), dtype="float32")
# For each video.
for idx, path in enumerate(video_paths):
# Gather all its frames and add a batch dimension.
frames = load_video(os.path.join(root_dir, path))
frames = frames[None, ...]
# Extract features from the frames of the current video.
for i, batch in enumerate(frames):
video_length = batch.shape[0]
select_frame = np.linspace(0, video_length-1, MAX_SEQ_LENGTH,endpoint=True,retstep=True,dtype=int)[0]
# length = min(MAX_SEQ_LENGTH, video_length)
video_batch[idx] = batch[select_frame].reshape(20, 6912).astype('float32') / 255
return video_batch, labels
train_data, train_labels = prepare_all_videos(train_df, "C:/nyu/DRL/final_project/dataset/UCF101/train")
print(f"Frame features in train set: {train_data[0].shape}")
model.compile(
loss="categorical_crossentropy", optimizer=tf.keras.optimizers.Adam(), metrics=["accuracy"]
)
model.fit(train_data, train_labels, shuffle=True,
batch_size=50, epochs=30, validation_split=0.1,
verbose=1)
```
|
github_jupyter
|
from tensorflow_docs.vis import embed
from tensorflow import keras
from tensorflow.keras import backend as K
from imutils import paths
import matplotlib.pyplot as plt
import tensorflow as tf
import pandas as pd
import numpy as np
import imageio
import cv2
import os
from rl_with_videos.preprocessors.convnet import convnet_preprocessor
IMG_SIZE = 48
BATCH_SIZE = 64
EPOCHS = 10
MAX_SEQ_LENGTH = 20
NUM_FEATURES = 2048
TRAINING_FILE = "C:/nyu/DRL/final_project/dataset/UCF101/train.csv"
TESTING_FILE = "C:/nyu/DRL/final_project/dataset/UCF101/test.csv"
LABELS_CLASS = ['CricketShot', 'PlayingCello', 'Punch', 'ShavingBeard', 'TennisSwing']
NUM_CLASSES = 5
train_df = pd.read_csv(TRAINING_FILE)
test_df = pd.read_csv(TESTING_FILE)
print(f"Total videos for training: {len(train_df)}")
print(f"Total videos for testing: {len(test_df)}")
train_df.sample(10)
def crop_center_square(frame):
y, x = frame.shape[0:2]
min_dim = min(y, x)
start_x = (x // 2) - (min_dim // 2)
start_y = (y // 2) - (min_dim // 2)
return frame[start_y : start_y + min_dim, start_x : start_x + min_dim]
def load_video(path, max_frames=0, resize=(IMG_SIZE, IMG_SIZE)):
cap = cv2.VideoCapture(path)
frames = []
try:
while True:
ret, frame = cap.read()
if not ret:
break
frame = crop_center_square(frame)
frame = cv2.resize(frame, resize)
frame = frame[:, :, [2, 1, 0]]
frames.append(frame)
if len(frames) == max_frames:
break
finally:
cap.release()
return np.array(frames)
feature_extractor = convnet_preprocessor([(6912,)], (48,48,3), 256)
feature_extractor.trainable = True
feature_extractor.summary()
class PositionalEmbedding(keras.layers.Layer):
def __init__(self, sequence_length, output_dim, **kwargs):
super().__init__(**kwargs)
self.position_embeddings = keras.layers.Embedding(
input_dim=sequence_length, output_dim=output_dim
)
self.sequence_length = sequence_length
self.output_dim = output_dim
def call(self, inputs):
# The inputs are of shape: `(batch_size, frames, num_features)`
length = tf.shape(inputs)[1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_positions = self.position_embeddings(positions)
return inputs + embedded_positions
# def compute_mask(self, inputs, mask=None):
# mask = tf.reduce_any(tf.cast(inputs, "bool"), axis=-1)
# return mask
class MultiHeadAttention(keras.layers.Layer):
def __init__(self,multiheads, head_dim,mask_right=False,**kwargs):
self.multiheads = multiheads
self.head_dim = head_dim
self.output_dim = multiheads * head_dim
self.mask_right = mask_right
super(MultiHeadAttention, self).__init__(**kwargs)
def compute_output_shape(self,input_shape):
return (input_shape[0][0],input_shape[0][1],self.output_dim) #shape=[batch_size,Q_sequence_length,self.multiheads*self.head_dim]
def build(self,input_shape):
self.WQ = self.add_weight(name='WQ',
shape=(input_shape[0][-1].value, self.output_dim),#input_shape[0] -> Q_seq
initializer='glorot_uniform',
trainable=True)
self.WK = self.add_weight(name='WK',
shape=(input_shape[1][-1].value, self.output_dim),#input_shape[1] -> K_seq
initializer='glorot_uniform',
trainable=True)
self.WV = self.add_weight(name='WV',
shape=(input_shape[2][-1].value, self.output_dim),#input_shape[2] -> V_seq
initializer='glorot_uniform',
trainable=True)
super(MultiHeadAttention, self).build(input_shape)
def Mask(self,inputs,seq_len,mode='add'):
if seq_len == None:
return inputs
else:
mask = K.one_hot(indices=seq_len[:,0],num_classes=K.shape(inputs)[1])#mask.shape=[batch_size,short_sequence_length],mask=[[0,0,0,0,1,0,0,..],[0,1,0,0,0,0,0...]...]
mask = 1 - K.cumsum(mask,axis=1)#mask.shape=[batch_size,short_sequence_length],mask=[[1,1,1,1,0,0,0,...],[1,0,0,0,0,0,0,...]...]
for _ in range(len(inputs.shape)-2):
mask = K.expand_dims(mask, 2)
if mode == 'mul':
return inputs * mask
elif mode == 'add':
return inputs - (1 - mask) * 1e12
def call(self,QKVs):
if len(QKVs) == 3:
Q_seq,K_seq,V_seq = QKVs
Q_len,V_len = None,None
elif len(QKVs) == 5:
Q_seq,K_seq,V_seq,Q_len,V_len = QKVs
Q_seq = K.dot(Q_seq,self.WQ)#Q_seq.shape=[batch_size,Q_sequence_length,self.output_dim]=[batch_size,Q_sequence_length,self.multiheads*self.head_dim]
Q_seq = K.reshape(Q_seq,shape=(-1,K.shape(Q_seq)[1],self.multiheads,self.head_dim))#Q_seq.shape=[batch_size,Q_sequence_length,self.multiheads,self.head_dim]
Q_seq = K.permute_dimensions(Q_seq,pattern=(0,2,1,3))#Q_seq.shape=[batch_size,self.multiheads,Q_sequence_length,self.head_dim]
K_seq = K.dot(K_seq,self.WK)
K_seq = K.reshape(K_seq,shape=(-1,K.shape(K_seq)[1],self.multiheads,self.head_dim))
K_seq = K.permute_dimensions(K_seq,pattern=(0,2,1,3))
V_seq = K.dot(V_seq,self.WV)
V_seq = K.reshape(V_seq,shape=(-1,K.shape(V_seq)[1],self.multiheads,self.head_dim))
V_seq = K.permute_dimensions(V_seq,pattern=(0,2,1,3))
A = K.batch_dot(Q_seq,K_seq,axes=[3,3])/K.sqrt(K.cast(self.head_dim,dtype='float32'))#A.shape=[batch_size,self.multiheads,Q_sequence_length,K_sequence_length]
A = K.permute_dimensions(A,pattern=(0,3,2,1))#A.shape=[batch_size,K_sequence_length,Q_sequence_length,self.multiheads]
A = self.Mask(A,V_len,'add')
A = K.permute_dimensions(A,pattern=(0,3,2,1))#A.shape=[batch_size,self.multiheads,Q_sequence_length,K_sequence_length]
if self.mask_right:
ones = K.ones_like(A[:1,:1])
lower_triangular = K.tf.matrix_band_part(ones,num_lower=-1,num_upper=0)
mask = (ones - lower_triangular) * 1e12
A = A - mask #Element-wise subtract,A.shape=[batch_size,self.multiheads,Q_sequence_length,K_sequence_length]
A = K.softmax(A) #A.shape=[batch_size,self.multiheads,Q_sequence_length,K_sequence_length]
#V_seq.shape=[batch_size,V_sequence_length,V_embedding_dim]
O_seq = K.batch_dot(A,V_seq,axes=[3,2])#O_seq.shape=[batch_size,self.multiheads,Q_sequence_length,V_sequence_length]
O_seq = K.permute_dimensions(O_seq,pattern=(0,2,1,3))#O_seq.shape=[batch_size,Q_sequence_length,self.multiheads,V_sequence_length]
O_seq = K.reshape(O_seq,shape=(-1,K.shape(O_seq)[1],self.output_dim))#O_seq.shape=[,Q_sequence_length,self.multiheads*self.head_dim]
O_seq = self.Mask(O_seq,Q_len,'mul')
return O_seq
class TransformerEncoder(keras.layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = MultiHeadAttention(
num_heads, embed_dim
)
self.dense_proj = keras.Sequential(
[keras.layers.Dense(dense_dim, activation='relu'), keras.layers.Dense(embed_dim),]
)
# self.layernorm_1 = keras.layers.LayerNormalization()
# self.layernorm_2 = keras.layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention([inputs, inputs, inputs])
proj_input = keras.layers.BatchNormalization()(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return keras.layers.BatchNormalization()(proj_input + proj_output)
def Transformer(
input_shapes,
output_size,
feature_extractor,
hidden_state_num = 2,
hidden_state_size = (16, 8),
*args,
**kwargs):
sequence_length = MAX_SEQ_LENGTH
embed_dim = 256
dense_dim = 4
num_heads = 1
video = keras.layers.Input(shape=input_shapes,name='video_input')
encoded_frame = keras.layers.TimeDistributed(keras.layers.Lambda(lambda x: feature_extractor(x)))(video)
x = PositionalEmbedding(
sequence_length, embed_dim, name="frame_position_embedding"
)(encoded_frame)
x = TransformerEncoder(embed_dim, dense_dim, num_heads, name="transformer_layer")(x)
x = keras.layers.GlobalMaxPooling1D()(x)
x = keras.layers.Dropout(0.5)(x)
# encoded_vid = keras.layers.Dense(8, activation='relu')(encoded_vid)
outputs = keras.layers.Dense(output_size, activation='softmax')(x)
model = keras.models.Model(inputs=[video],outputs=outputs)
return model
model = Transformer((None, 6912), 5, feature_extractor)
model.summary()
def label_processor(labels, labels_class):
new_labels = np.zeros(labels.shape)
for i in range(labels.shape[0]):
index = labels_class.index(labels[i])
new_labels[i] = index
return new_labels
def prepare_all_videos(df, root_dir):
num_samples = len(df)
video_paths = df["video_name"].values.tolist()
labels = df["tag"].values
labels = label_processor(labels, LABELS_CLASS)
labels = keras.utils.to_categorical(labels, NUM_CLASSES)
video_batch = np.zeros((num_samples, MAX_SEQ_LENGTH, 6912), dtype="float32")
# For each video.
for idx, path in enumerate(video_paths):
# Gather all its frames and add a batch dimension.
frames = load_video(os.path.join(root_dir, path))
frames = frames[None, ...]
# Extract features from the frames of the current video.
for i, batch in enumerate(frames):
video_length = batch.shape[0]
select_frame = np.linspace(0, video_length-1, MAX_SEQ_LENGTH,endpoint=True,retstep=True,dtype=int)[0]
# length = min(MAX_SEQ_LENGTH, video_length)
video_batch[idx] = batch[select_frame].reshape(20, 6912).astype('float32') / 255
return video_batch, labels
train_data, train_labels = prepare_all_videos(train_df, "C:/nyu/DRL/final_project/dataset/UCF101/train")
print(f"Frame features in train set: {train_data[0].shape}")
model.compile(
loss="categorical_crossentropy", optimizer=tf.keras.optimizers.Adam(), metrics=["accuracy"]
)
model.fit(train_data, train_labels, shuffle=True,
batch_size=50, epochs=30, validation_split=0.1,
verbose=1)
| 0.585575 | 0.341226 |
```
import numpy as np
import os
import pandas as pd
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('o', 's', '^', 'v', '<')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
lab = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
lab = lab.reshape(xx1.shape)
plt.contourf(xx1, xx2, lab, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class examples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=f'Class {cl}',
edgecolor='black')
class Perceptron:
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.b_ = float(0.)
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_ += update * xi
self.b_ += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
return np.dot(X, self.w_) + self.b_
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, 0)
class AdalineGD:
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.b_ = float(0.)
self.losses_ = []
for i in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = (y - output)
self.w_ += self.eta * 2.0 * X.T.dot(errors) / X.shape[0]
self.b_ += self.eta * 2.0 * errors.mean()
loss = (errors ** 2).mean()
self.losses_.append(loss)
return self
def net_input(self, X):
return np.dot(X, self.w_) + self.b_
def activation(self, X):
return X
def predict(self, X):
return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0)
class AdalineSGD:
def __init__(self, eta=0.01, n_iter=10, shuffle=True, random_state=None):
self.eta = eta
self.n_iter = n_iter
self.w_initialized = False
self.shuffle = shuffle
self.random_state = random_state
def fit(self, X, y):
self._initialize_weights(X.shape[1])
self.losses_ = []
for i in range(self.n_iter):
if self.shuffle:
X, y = self._shuffle(X, y)
losses = []
for xi, target in zip(X, y):
losses.append(self._update_weights(xi, target))
avg_loss = np.mean(losses)
self.losses_.append(avg_loss)
return self
def partial_fit(self, X, y):
if not self.w_initialized:
self._initialize_weights(X.shape[1])
if y.ravel().shape[0] > 1:
for xi, target in zip(X, y):
self._update_weights(xi, target)
else:
self._update_weights(X, y)
return self
def _shuffle(self, X, y):
r = self.rgen.permutation(len(y))
return X[r], y[r]
def _initialize_weights(self, m):
self.rgen = np.random.RandomState(self.random_state)
self.w_ = self.rgen.normal(loc=0.0, scale=0.01, size=m)
self.b_ = float(0.)
self.w_initialized = True
def _update_weights(self, xi, target):
output = self.activation(self.net_input(xi))
error = (target - output)
self.w_ += self.eta * 2.0 * xi * (error)
self.b_ += self.eta * 2.0 * error
loss = error ** 2
return loss
def net_input(self, X):
return np.dot(X, self.w_) + self.b_
def activation(self, X):
return X
def predict(self, X):
return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0)
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
header=None,
encoding='utf-8')
df.head()
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', 0, 1)
X = df.iloc[0:100, [0, 2]].values
plt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='Setosa')
plt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='s', label='Versicolor')
plt.xlabel('Sepal length [cm]')
plt.ylabel('Petal length [cm]')
plt.legend(loc='upper left')
plt.show()
ppn = Perceptron(eta=0.01, n_iter=10)
ppn.fit(X, y)
plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of Updates')
plt.show()
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('Sepal length [cm]')
plt.ylabel('Petal length [cm]')
plt.legend(loc='upper left')
plt.show()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10,4))
ada1 = AdalineGD(n_iter=15, eta=0.1).fit(X, y)
ax[0].plot(range(1, len(ada1.losses_) + 1), np.log10(ada1.losses_), marker='o')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Mean Squared Error)')
ax[0].set_title('Adaline - Learning Rate 0.1')
ada2 = AdalineGD(n_iter=15, eta=0.0001).fit(X, y)
ax[1].plot(range(1, len(ada2.losses_) + 1), ada2.losses_, marker='o')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('log(Mean Squared Error)')
ax[1].set_title('Adaline - Learning Rate 0.0001')
plt.show()
X_std = np.copy(X)
X_std[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X_std[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
X_std
ada_gd = AdalineGD(n_iter=20, eta=0.5)
ada_gd.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada_gd)
plt.tight_layout()
plt.show()
plt.plot(range(1, len(ada_gd.losses_) + 1), ada_gd.losses_, marker='o')
plt.tight_layout()
plt.show()
ada_sgd = AdalineSGD(n_iter=15, eta=0.01, random_state=1)
ada_sgd.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada_sgd)
plt.legend(loc='upper left')
plt.show()
plt.plot(range(1, len(ada_sgd.losses_) + 1), ada_sgd.losses_, marker='o')
plt.tight_layout()
plt.show()
```
|
github_jupyter
|
import numpy as np
import os
import pandas as pd
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('o', 's', '^', 'v', '<')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
lab = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
lab = lab.reshape(xx1.shape)
plt.contourf(xx1, xx2, lab, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class examples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=f'Class {cl}',
edgecolor='black')
class Perceptron:
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.b_ = float(0.)
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_ += update * xi
self.b_ += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
return np.dot(X, self.w_) + self.b_
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, 0)
class AdalineGD:
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.b_ = float(0.)
self.losses_ = []
for i in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = (y - output)
self.w_ += self.eta * 2.0 * X.T.dot(errors) / X.shape[0]
self.b_ += self.eta * 2.0 * errors.mean()
loss = (errors ** 2).mean()
self.losses_.append(loss)
return self
def net_input(self, X):
return np.dot(X, self.w_) + self.b_
def activation(self, X):
return X
def predict(self, X):
return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0)
class AdalineSGD:
def __init__(self, eta=0.01, n_iter=10, shuffle=True, random_state=None):
self.eta = eta
self.n_iter = n_iter
self.w_initialized = False
self.shuffle = shuffle
self.random_state = random_state
def fit(self, X, y):
self._initialize_weights(X.shape[1])
self.losses_ = []
for i in range(self.n_iter):
if self.shuffle:
X, y = self._shuffle(X, y)
losses = []
for xi, target in zip(X, y):
losses.append(self._update_weights(xi, target))
avg_loss = np.mean(losses)
self.losses_.append(avg_loss)
return self
def partial_fit(self, X, y):
if not self.w_initialized:
self._initialize_weights(X.shape[1])
if y.ravel().shape[0] > 1:
for xi, target in zip(X, y):
self._update_weights(xi, target)
else:
self._update_weights(X, y)
return self
def _shuffle(self, X, y):
r = self.rgen.permutation(len(y))
return X[r], y[r]
def _initialize_weights(self, m):
self.rgen = np.random.RandomState(self.random_state)
self.w_ = self.rgen.normal(loc=0.0, scale=0.01, size=m)
self.b_ = float(0.)
self.w_initialized = True
def _update_weights(self, xi, target):
output = self.activation(self.net_input(xi))
error = (target - output)
self.w_ += self.eta * 2.0 * xi * (error)
self.b_ += self.eta * 2.0 * error
loss = error ** 2
return loss
def net_input(self, X):
return np.dot(X, self.w_) + self.b_
def activation(self, X):
return X
def predict(self, X):
return np.where(self.activation(self.net_input(X)) >= 0.5, 1, 0)
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
header=None,
encoding='utf-8')
df.head()
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', 0, 1)
X = df.iloc[0:100, [0, 2]].values
plt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='Setosa')
plt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='s', label='Versicolor')
plt.xlabel('Sepal length [cm]')
plt.ylabel('Petal length [cm]')
plt.legend(loc='upper left')
plt.show()
ppn = Perceptron(eta=0.01, n_iter=10)
ppn.fit(X, y)
plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of Updates')
plt.show()
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('Sepal length [cm]')
plt.ylabel('Petal length [cm]')
plt.legend(loc='upper left')
plt.show()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10,4))
ada1 = AdalineGD(n_iter=15, eta=0.1).fit(X, y)
ax[0].plot(range(1, len(ada1.losses_) + 1), np.log10(ada1.losses_), marker='o')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Mean Squared Error)')
ax[0].set_title('Adaline - Learning Rate 0.1')
ada2 = AdalineGD(n_iter=15, eta=0.0001).fit(X, y)
ax[1].plot(range(1, len(ada2.losses_) + 1), ada2.losses_, marker='o')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('log(Mean Squared Error)')
ax[1].set_title('Adaline - Learning Rate 0.0001')
plt.show()
X_std = np.copy(X)
X_std[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X_std[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
X_std
ada_gd = AdalineGD(n_iter=20, eta=0.5)
ada_gd.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada_gd)
plt.tight_layout()
plt.show()
plt.plot(range(1, len(ada_gd.losses_) + 1), ada_gd.losses_, marker='o')
plt.tight_layout()
plt.show()
ada_sgd = AdalineSGD(n_iter=15, eta=0.01, random_state=1)
ada_sgd.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada_sgd)
plt.legend(loc='upper left')
plt.show()
plt.plot(range(1, len(ada_sgd.losses_) + 1), ada_sgd.losses_, marker='o')
plt.tight_layout()
plt.show()
| 0.749546 | 0.495178 |
<a href="https://colab.research.google.com/github/preeti13456/pytorch/blob/master/tensor_operations.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Here we are using pytorch library with torch.tensor() functionalities such that it will help to take all the inputs vectors, matrices, 3d-arrays, numpy arrays. And help to apply mathematics on it and convert it in tensor form .
### Functions we can incorporate in torch.tensor() matrix.
An short introduction about PyTorch and about the chosen functions.
- function 1 : math functions like torch.rand(), torch.abs_() and torch.allclose
- function 2 : torch.as_strided (layout functions)
- function 3 : In these the functions deals with the individual elements instead of clusters.
- function 4 : It deals with subtensors such as storage_offset() function.
- function 5 : symeifg(eigenvalue functions)
```
# Import torch and other required modules
import torch
import os
import numpy as np
```
## Function 1 - Some of the math functions we used here to play with the tensor inputs.
# 1. tensor = torch.rand ((no.of rows, no. of columns)),
# 2. torch.abs_(input, alpha=1)
# 3. torch.allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False)
```
# Example 1 - working (change this)
#x = np.array([[1, 2], [3, 4.]])
#y = torch.from_numpy(x)
tensor=torch.rand((2,3))
z = tensor.new_tensor([[9,0,-7.],[5,7,.0]], requires_grad=False)
z.shape
z.permute([-2,1])
```
First here we are using rand() to have random items in the corresponding dimensions and store it in tensor . Then creating a new tensor by by function new_tensor() making gradiend_descent value as false. And we are multiplying it with tensor such that current tensor z will take dimension in tensor. We can check the length of tensor z with .shape method. And can also permute some indices with .permute.
```
# Example 2 - working
z.abs_()
z.add_(2,alpha=1)
```
Here we are using abs_() function to convert all the tensor value positive , then here we are using .add_ to add input number to each item of tensor with alpha value to be 1.
```
# Example 3 - breaking (to illustrate when it breaks)
tensor = torch.tensor([[1, 2,-1.], [3, 4, 5]])
tensor2 = torch.exp(tensor).sum()
w = torch.rand(2,3)
w.allclose(tensor, rtol=1e-05, atol=1e-08, equal_nan=False)
w.argsort()
w.asin_()
```
In allclose function all of this represents:
input (Tensor) – first tensor to compare
other (Tensor) – second tensor to compare
atol (float, optional) – absolute tolerance. Default: 1e-08
rtol (float, optional) – relative tolerance. Default: 1e-05
equal_nan (bool, optional) – if True, then two NaN s will be considered equal. Default: False
argsort function is used to output all the elements in tensors in sorted order.
And converting the output to asin_ will provides support for the inverse sine function in PyTorch. It expects the input to be in the range [-1, 1] and gives the output in radian form. It returns nan if the input does not lie in the range [-1, 1]. The input type is tensor and if the input contains more than one element, element-wise inverse sine is computed.
Closing comments about when to use this function
## Function 2 - 1.torch.as_strided , 2.torch.bincount, 3 . torch.diag_embed
1. In torch.as_strided function it will Create a view of an existing torch.Tensor input with specified size, stride and storage_offset.
2. In bincount function each tensor value have some weight associated with it such that input will contain the range of tensor and weigt means how far te tensor will be that's why last element is the dimension associated with weights as a tensor input.
3. In diag_embed in the tensor the inputs will be added along the diagonal only.
```
# Example 1 - working
w.as_strided((3,1),(2,2), storage_offset=0)
t = torch.tensor([[9,0,0],[-9,-1.,-4],[0,-9,-8]])
#w.baddbmm_(t,t, beta=1, alpha=1)
w.bernoulli_(p=0.5, generator=None)
```
Here bernoulli_() function will Fills each location of self with an independent sample from Bernoulli(p) . self can have integral dtype.
```
# Example 2 - working
torch.bincount(weights=torch.tensor([2]),input=torch.tensor([11]) ,minlength=1)
```
Here the function bincount() take two tensors one would be the input which is a tensor tells the range of input values and weights which tells how much max value of the tensor and minlength tells that only 1-d tensor input is to be taken.
```
# Example 3 - breaking (to illustrate when it breaks)
#torch.cholesky_solve(input=torch.tensor([[5,6],[8,9]]),input2=torch.tensor([[3,8],[1,8]]),upper=False)
torch.diag_embed(t,offset=0, dim1=-2, dim2=-1)
```
Makes a tensor whose diagonals of certain 2D planes (determined by dim1 and dim2) are filled by input. To encourage making bunched corner to corner frameworks, the 2D planes shaped by the last two components of the returned tensor are picked as a matter of course.
The contention offset controls which diagonal to consider:
Whenever offset = 0, it is the principle diagonal.
Whenever offset > 0, it is over the principle diagonal.
Whenever offset < 0, it is beneath the principle corner to corner.
The size of the new grid will be determined to make the predetermined diagonal of the size of the last information measurement. Note that for balance other than 00 , the request for dim1 and dim2 matters. Trading them is identical to changing the indication of offset.
Closing comments about when to use this function
## Function 3 - 1. torch.erfinv, 2. torch.spilt() 3. torch.sparse_mask
erfinv will deal with inverse error function of input element.
As the name suggest .split function will split the function into chunks and then apply its functionality on them.
sparse_mask take input value and convert it along with the mask input into list.
```
# Example 1 - working
torch.erfinv(t)
```
torch.erfinv(input, out=None) → Tensor
Computes the inverse error function of each element of input. The inverse error function is defined in the range (-1, 1)(−1,1)
```
# Example 2 - working
#torch.index_fill(t,w,2,dim=1)
#torch.scatter_add(dim=1, index=t, src=w)
torch.split(t,split_size_or_sections=2,dim=0)
```
Parts the tensor into chunks. Each chunks is a perspective on the first tensor.
On the off chance that split_size_or_sections is a whole number sort, at that point tensor will be part into similarly measured pieces (if conceivable). Last piece will be smaller if the tensor size along the given measurement dimensions isn't distinguishable by split_size.
On the other hand that split_size_or_sections is a rundown, at that point tensor will be part into len(split_size_or_sections) chunks with sizes in dimensions as indicated by split_size_or_sections
```
# Example 3 - breaking (to illustrate when it breaks)
nnz = 5
dims = [5, 5,2,2]
I = torch.cat([torch.randint(0, dims[0], size=(nnz,)),
torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz)
V = torch.randn(nnz, dims[2], dims[3])
size = torch.Size(dims)
S = torch.sparse_coo_tensor(I,V,size).coalesce()
D = torch.randn(dims)
D.sparse_mask(S)
```
Returns another SparseTensor with values from Tensor info iltered by indices of mask and values are ignored. input and mask must have the same shape.
Parameters
input (Tensor) – an info Tensor
cover (SparseTensor) – a SparseTensor which we channel input dependent on its lists
Closing comments about when to use this function
## Function 4 -
1. storage_offset() : It deals with the subtensors.
2. stride : It deals with the dimensions present as agruments.
3. sum : It will sum up the elements in the matrix.
```
# Example 1 - working
#torch.stft(n_fft=t,hop_length=None, win_length=None, window=None, center=True, pad_mode='reflect', normalized=False, onesided=True)
t.storage_offset()
```
Since here there is no self argument calling in function so the offset for tensor t will be 0.
```
# Example 2 - working
t.stride()
```
stride is when one value jumps to the other . It mainly takes dimensions as argument in tensor form such that for each value we returned the dimension of tensor and return nothing if no argument is passed .
```
# Example 3 - breaking (to illustrate when it breaks)
torch.sum(t,dtype=None)
```
Here sum function is adding all the tensors elements and giving us the out since in tensor all the element in matrix converted into float so it will ignore the different dtype elementsonly the total no. of elements in dimenional array should be same.
Closing comments about when to use this function
## Function 5 - Here we are using functions taking eigen value and eigenvectors
Basically here the concept of upper triangular matrix is applied on the vector.
```
# Example 1 - working
torch.symeig(t,eigenvectors=False, upper=True)
```
This function will take the concept of upper triangular matrix such that it will be used to calculate the eigen values and eigen vectors . This capacityreturns eigenvalues and eigenvectors of a genuine symmetric grid input or a cluster of genuine symmetric networks, spoke to by a namedtuple (eigenvalues, eigenvectors).
This capacity ascertains all eigenvalues (and vectors) of info to such an extent that \text{input} = V \text{diag}(e) V^Tinput=Vdiag(e)V
The boolean contention eigenvectors characterizes calculation of both eigenvectors and eigenvalues or eigenvalues as it were.
On the off chance that it is False, just eigenvalues are figured. On the off chance that it is True, the two eigenvalues and eigenvectors are figured.
Since the information network input should be symmetric, just the upper triangular segment is utilized naturally.
In the event that upper is False, at that point lower triangular segment is utilized.
```
# Example 2 - working
torch.symeig(t, eigenvectors=True)
```
It will be same as eigen values but now since eigenvectors = True it will print both eigenvalues and eigenvectors.
```
# Example 3 - breaking (to illustrate when it breaks
torch.rand(2)
```
two rows random values print
Closing comments about when to use this function
## Conclusion
Summarize what was covered in this notebook, and where to go next
Functions of torch.tensors and torch will be used.
## Reference Links
Provide links to your references and other interesting articles about tensors
* Official documentation for `torch.Tensor`: https://pytorch.org/docs/stable/tensors.html
* ...
```
!pip install jovian --upgrade --quiet
import jovian
jovian.commit()
```
|
github_jupyter
|
# Import torch and other required modules
import torch
import os
import numpy as np
# Example 1 - working (change this)
#x = np.array([[1, 2], [3, 4.]])
#y = torch.from_numpy(x)
tensor=torch.rand((2,3))
z = tensor.new_tensor([[9,0,-7.],[5,7,.0]], requires_grad=False)
z.shape
z.permute([-2,1])
# Example 2 - working
z.abs_()
z.add_(2,alpha=1)
# Example 3 - breaking (to illustrate when it breaks)
tensor = torch.tensor([[1, 2,-1.], [3, 4, 5]])
tensor2 = torch.exp(tensor).sum()
w = torch.rand(2,3)
w.allclose(tensor, rtol=1e-05, atol=1e-08, equal_nan=False)
w.argsort()
w.asin_()
# Example 1 - working
w.as_strided((3,1),(2,2), storage_offset=0)
t = torch.tensor([[9,0,0],[-9,-1.,-4],[0,-9,-8]])
#w.baddbmm_(t,t, beta=1, alpha=1)
w.bernoulli_(p=0.5, generator=None)
# Example 2 - working
torch.bincount(weights=torch.tensor([2]),input=torch.tensor([11]) ,minlength=1)
# Example 3 - breaking (to illustrate when it breaks)
#torch.cholesky_solve(input=torch.tensor([[5,6],[8,9]]),input2=torch.tensor([[3,8],[1,8]]),upper=False)
torch.diag_embed(t,offset=0, dim1=-2, dim2=-1)
# Example 1 - working
torch.erfinv(t)
# Example 2 - working
#torch.index_fill(t,w,2,dim=1)
#torch.scatter_add(dim=1, index=t, src=w)
torch.split(t,split_size_or_sections=2,dim=0)
# Example 3 - breaking (to illustrate when it breaks)
nnz = 5
dims = [5, 5,2,2]
I = torch.cat([torch.randint(0, dims[0], size=(nnz,)),
torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz)
V = torch.randn(nnz, dims[2], dims[3])
size = torch.Size(dims)
S = torch.sparse_coo_tensor(I,V,size).coalesce()
D = torch.randn(dims)
D.sparse_mask(S)
# Example 1 - working
#torch.stft(n_fft=t,hop_length=None, win_length=None, window=None, center=True, pad_mode='reflect', normalized=False, onesided=True)
t.storage_offset()
# Example 2 - working
t.stride()
# Example 3 - breaking (to illustrate when it breaks)
torch.sum(t,dtype=None)
# Example 1 - working
torch.symeig(t,eigenvectors=False, upper=True)
# Example 2 - working
torch.symeig(t, eigenvectors=True)
# Example 3 - breaking (to illustrate when it breaks
torch.rand(2)
!pip install jovian --upgrade --quiet
import jovian
jovian.commit()
| 0.721351 | 0.98984 |
# RNN on Time series dataset
On this Notebook we will build a recurrent model on a multi-variate time series dataset.
## The data
The dataset is a multi-variate time series. Let's do a very short EDA on it:
```
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
# load data
def parse(x):
return datetime.strptime(x, '%Y %m %d %H')
dataset_path = '/home/fer/data/formaciones/master/deep-learning-intro/datasets/time_series/pollution.csv'
dataset = pd.read_csv(dataset_path, parse_dates = [['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse)
dataset.drop('No', axis=1, inplace=True)
values = dataset.values
# specify columns to plot
groups = [0, 1, 2, 3, 5, 6, 7]
i = 1
# plot each column
plt.figure(figsize=(10,10))
for group in groups:
plt.subplot(len(groups), 1, i)
plt.plot(values[:, group])
plt.title(dataset.columns[group], y=0.5, loc='right')
i += 1
plt.show()
# manually specify column names
dataset.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain']
dataset.index.name = 'date'
# mark all NA values with 0
dataset['pollution'].fillna(0, inplace=True)
# drop the first 24 hours
dataset = dataset[24:]
# summarize first 5 rows
dataset.head()
```
This is what we have. It should be necessary to spend more time on reviewing it, but we will focus on the RNN application. Let's first get rid of the categorical feature by using a label encoder (not the best solution for sure) and scale the now continuous features.
<font color=red><b>Scale the dataset
<br>Hint: use the minmaxscaler function </b>
</font>
```
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
values = dataset.values
# integer encode direction
encoder = LabelEncoder()
values[:,4] = encoder.fit_transform(values[:,4])
values = values.astype('float32')
...
```
The next function converts our series data into a supervised learning one, by shifting it and adding timesteps to each row:
```
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
"""
Frame a time series as a supervised learning dataset.
Arguments:
data: Sequence of observations as a list or NumPy array.
n_in: Number of lag observations as input (X).
n_out: Number of observations as output (y).
dropnan: Boolean whether or not to drop rows with NaN values.
Returns:
Pandas DataFrame of series framed for supervised learning.
"""
n_vars = data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# frame as supervised learning
reframed = series_to_supervised(scaled, 1, 1)
reframed.head()
```
This way, in our example, we have the feature values for the current timestep and the previous. The idea is to predict the next from the current. So we are interested in predicting $var1(t)$ from $var1(t-1), var2(t-1), \cdots, var8(t-1)$. Let's drop the useless features:
```
# drop columns we don't want to predict
reframed.drop(reframed.columns[[9,10,11,12,13,14,15]], axis=1, inplace=True)
reframed.head()
```
Let's divide it now in traiing and testing. In order to do that, we can leave the last year as testing.
<font color=red><b>Split the data
</font>
```
# split into train and test sets
values = reframed.values
...
train = ...
test = ...
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
```
Please note the dataset shape: (N_samples, timesteps, n_features).
## Model Architecture:
We will build now an LSTM network in order to predict the target.
- Use a LSTM layer, with 50 units.
- Use a dense layer with a single feature: the one we are trying to predict.
- Compile it using mae as the loss and adam as the optimizer.
- train it using the test data as validation, not shuffling the data (why?) and using 10 epochs and a batch size of 72.
- Store the fitting inside a history variable, for plotting purposes.
<font color=red><b>Build the model
</font>
```
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
# design network
...
# fit network
history = ...
```
Now let's plot the loss. We have a few epochs, but we can see the prgress:
```
# plot history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
```
Finally, let's see how well we did. Keep in mind that the model does predict at scaled level. So we need to de-escalate:
```
import numpy as np
# make a prediction
yhat = model.predict(test_X)
x_test = test_X.reshape((test_X.shape[0], test_X.shape[2]))
# invert scaling for forecast
inv_yhat = np.concatenate((yhat, x_test[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
# invert scaling for actual
y_test = test_y.reshape((len(test_y), 1))
inv_y = np.concatenate((y_test, x_test[:, 1:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
```
<font color=red><b>Compute the rmse and mae at test level
</font>
```
from sklearn.metrics import ...
from math import sqrt
# calculate RMSE
rmse = ...
mae = ...
print('Test RMSE: %.3f' % rmse)
print('Test MAE: %.3f' % mae)
```
And finally, let's plot the first 100 results:
```
plt.figure(figsize=(10,10))
plt.plot(inv_y[0:100], marker='.', label="true")
plt.plot(inv_yhat[0:100], 'r', label="prediction")
plt.ylabel('Value')
plt.xlabel('Time Step')
plt.legend()
plt.show()
```
|
github_jupyter
|
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
# load data
def parse(x):
return datetime.strptime(x, '%Y %m %d %H')
dataset_path = '/home/fer/data/formaciones/master/deep-learning-intro/datasets/time_series/pollution.csv'
dataset = pd.read_csv(dataset_path, parse_dates = [['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse)
dataset.drop('No', axis=1, inplace=True)
values = dataset.values
# specify columns to plot
groups = [0, 1, 2, 3, 5, 6, 7]
i = 1
# plot each column
plt.figure(figsize=(10,10))
for group in groups:
plt.subplot(len(groups), 1, i)
plt.plot(values[:, group])
plt.title(dataset.columns[group], y=0.5, loc='right')
i += 1
plt.show()
# manually specify column names
dataset.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain']
dataset.index.name = 'date'
# mark all NA values with 0
dataset['pollution'].fillna(0, inplace=True)
# drop the first 24 hours
dataset = dataset[24:]
# summarize first 5 rows
dataset.head()
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
values = dataset.values
# integer encode direction
encoder = LabelEncoder()
values[:,4] = encoder.fit_transform(values[:,4])
values = values.astype('float32')
...
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
"""
Frame a time series as a supervised learning dataset.
Arguments:
data: Sequence of observations as a list or NumPy array.
n_in: Number of lag observations as input (X).
n_out: Number of observations as output (y).
dropnan: Boolean whether or not to drop rows with NaN values.
Returns:
Pandas DataFrame of series framed for supervised learning.
"""
n_vars = data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# frame as supervised learning
reframed = series_to_supervised(scaled, 1, 1)
reframed.head()
# drop columns we don't want to predict
reframed.drop(reframed.columns[[9,10,11,12,13,14,15]], axis=1, inplace=True)
reframed.head()
# split into train and test sets
values = reframed.values
...
train = ...
test = ...
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
# design network
...
# fit network
history = ...
# plot history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
import numpy as np
# make a prediction
yhat = model.predict(test_X)
x_test = test_X.reshape((test_X.shape[0], test_X.shape[2]))
# invert scaling for forecast
inv_yhat = np.concatenate((yhat, x_test[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
# invert scaling for actual
y_test = test_y.reshape((len(test_y), 1))
inv_y = np.concatenate((y_test, x_test[:, 1:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
from sklearn.metrics import ...
from math import sqrt
# calculate RMSE
rmse = ...
mae = ...
print('Test RMSE: %.3f' % rmse)
print('Test MAE: %.3f' % mae)
plt.figure(figsize=(10,10))
plt.plot(inv_y[0:100], marker='.', label="true")
plt.plot(inv_yhat[0:100], 'r', label="prediction")
plt.ylabel('Value')
plt.xlabel('Time Step')
plt.legend()
plt.show()
| 0.659734 | 0.964556 |
# Model Test-Train Performance Evaluation
### This notebook contains in section-IV figures-12, 13, 14 from the paper
```
# only need this line in jupyter
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# draw bar graph of test,train scores(accuracy, precision, recall, f1 score, auc) for each classifier
def draw_barGraph(train_scores, test_scores, score_name, classifiers, graph_label):
labels = classifiers
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
ax.bar(x - width/2, train_scores, width, label='train')
ax.bar(x + width/2, test_scores, width, label='test')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel(score_name)
ax.set_title(graph_label)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc='lower right')
plt.xlim([-1, 4])
plt.show()
classifiers = ['Decision Tree', 'KNN', 'LinearSVM', 'RandomForest']
```
## The test and train scores below were taken from each classifiers corresponding notebook.
```
# train accuracy
decisionTree_train_accuracy = 0.965
knn_train_accuracy = 0.959
linearSVM_train_accuracy = 0.943
randomForest_train_accuracy = 0.963
# test accuracy
decisionTree_test_accuracy = 0.961
knn_test_accuracy = 0.958
linearSVM_test_accuracy = 0.946
randomForest_test_accuracy = 0.963
train_accuracies = [decisionTree_train_accuracy, knn_train_accuracy,
linearSVM_train_accuracy, randomForest_train_accuracy]
test_accuracies = [decisionTree_test_accuracy, knn_test_accuracy,
linearSVM_test_accuracy, randomForest_test_accuracy]
# train f1
decisionTree_train_f1 = 0.951
knn_train_f1 = 0.942
linearSVM_train_f1 = 0.92
randomForest_train_f1 = 0.951
# test f1
decisionTree_test_f1 = 0.945
knn_test_f1 = 0.94
linearSVM_test_f1 = 0.924
randomForest_test_f1 = 0.947
train_f1s = [decisionTree_train_f1, knn_train_f1, linearSVM_train_f1, randomForest_train_f1]
test_f1s = [decisionTree_test_f1, knn_test_f1, linearSVM_test_f1, randomForest_test_f1]
# train auc
decisionTree_train_auc = 0.994
knn_train_auc = 0.985
linearSVM_train_auc = 0.985
randomForest_train_auc = 0.994
# test auc
decisionTree_test_auc = 0.985
knn_test_auc = 0.982
linearSVM_test_auc = 0.984
randomForest_test_auc = 0.993
train_aucs = [decisionTree_train_auc, knn_train_auc, linearSVM_train_auc, randomForest_train_auc]
test_aucs = [decisionTree_test_auc, knn_test_auc, linearSVM_test_auc, randomForest_test_auc]
```
## Accuracy
```
draw_barGraph(
train_scores = train_accuracies, test_scores = test_accuracies,
score_name = 'accuracy', classifiers = classifiers, graph_label = "train-test accuracies"
)
```
## F1-score
```
draw_barGraph(
train_scores = train_f1s, test_scores = test_f1s,
score_name = 'f1-score', classifiers = classifiers, graph_label = "train-test f1-scores"
)
```
## AUC
```
draw_barGraph(
train_scores = train_aucs, test_scores = test_aucs,
score_name = 'AUC', classifiers = classifiers, graph_label = "train-test AUCs"
)
```
|
github_jupyter
|
# only need this line in jupyter
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# draw bar graph of test,train scores(accuracy, precision, recall, f1 score, auc) for each classifier
def draw_barGraph(train_scores, test_scores, score_name, classifiers, graph_label):
labels = classifiers
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
ax.bar(x - width/2, train_scores, width, label='train')
ax.bar(x + width/2, test_scores, width, label='test')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel(score_name)
ax.set_title(graph_label)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(loc='lower right')
plt.xlim([-1, 4])
plt.show()
classifiers = ['Decision Tree', 'KNN', 'LinearSVM', 'RandomForest']
# train accuracy
decisionTree_train_accuracy = 0.965
knn_train_accuracy = 0.959
linearSVM_train_accuracy = 0.943
randomForest_train_accuracy = 0.963
# test accuracy
decisionTree_test_accuracy = 0.961
knn_test_accuracy = 0.958
linearSVM_test_accuracy = 0.946
randomForest_test_accuracy = 0.963
train_accuracies = [decisionTree_train_accuracy, knn_train_accuracy,
linearSVM_train_accuracy, randomForest_train_accuracy]
test_accuracies = [decisionTree_test_accuracy, knn_test_accuracy,
linearSVM_test_accuracy, randomForest_test_accuracy]
# train f1
decisionTree_train_f1 = 0.951
knn_train_f1 = 0.942
linearSVM_train_f1 = 0.92
randomForest_train_f1 = 0.951
# test f1
decisionTree_test_f1 = 0.945
knn_test_f1 = 0.94
linearSVM_test_f1 = 0.924
randomForest_test_f1 = 0.947
train_f1s = [decisionTree_train_f1, knn_train_f1, linearSVM_train_f1, randomForest_train_f1]
test_f1s = [decisionTree_test_f1, knn_test_f1, linearSVM_test_f1, randomForest_test_f1]
# train auc
decisionTree_train_auc = 0.994
knn_train_auc = 0.985
linearSVM_train_auc = 0.985
randomForest_train_auc = 0.994
# test auc
decisionTree_test_auc = 0.985
knn_test_auc = 0.982
linearSVM_test_auc = 0.984
randomForest_test_auc = 0.993
train_aucs = [decisionTree_train_auc, knn_train_auc, linearSVM_train_auc, randomForest_train_auc]
test_aucs = [decisionTree_test_auc, knn_test_auc, linearSVM_test_auc, randomForest_test_auc]
draw_barGraph(
train_scores = train_accuracies, test_scores = test_accuracies,
score_name = 'accuracy', classifiers = classifiers, graph_label = "train-test accuracies"
)
draw_barGraph(
train_scores = train_f1s, test_scores = test_f1s,
score_name = 'f1-score', classifiers = classifiers, graph_label = "train-test f1-scores"
)
draw_barGraph(
train_scores = train_aucs, test_scores = test_aucs,
score_name = 'AUC', classifiers = classifiers, graph_label = "train-test AUCs"
)
| 0.590543 | 0.877161 |
Using [seaborn](http://python-graph-gallery.com/40-basic-scatterplot-seaborn/) library, you can plot a basic scatterplot with the ability to use color encoding for different subsets of data. In the following examples, the iris dataset from seaborn repository is used. Using `hue` argument, it is possible to define groups in your data by different colors or shapes.
## Map a color per group
This example uses `lmplot()` function of seaborn library. In order to define each species with different colors, species column of the dataset given in `hue` argument. The list of arguments needed for the function is:
* `x` : positions of points on the X axis
* `y` : positions of points on the Y axis
* `data` : dataset
* `fit_reg` : if True, show the linear regression fit line
* `hue` : variables that define subsets of the data
* `legend` : if True, add a legend
Note that the legend is specified in through matplotlib, instead of seaborn itself. In order to specifically define a location of the legend, `plt.legend()` can be used.
```
# library & dataset
import seaborn as sns
import matplotlib.pyplot as plt
df = sns.load_dataset('iris')
# Use the 'hue' argument to provide a factor variable
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False)
# Move the legend to an empty part of the plot
plt.legend(loc='lower right')
plt.show()
```
## Map a marker per group
It is also possible to define categories with different marker shapes. You can do it by giving `markers` argument to the function:
* `markers` : a list of marker shapes
```
# library & dataset
import seaborn as sns
import matplotlib.pyplot as plt
df = sns.load_dataset('iris')
# give a list to the marker argument
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False, markers=["o", "x", "1"])
# Move the legend to an empty part of the plot
plt.legend(loc='lower right')
plt.show()
```
## Use another palette
Instead of using default color pallette, you can specify your pallette choice by `palette` parameter. There are many palettes available in seaborn including deep, muted, bright, pastel, dark, and colorblind.
```
# library & dataset
import seaborn as sns
import matplotlib.pyplot as plt
df = sns.load_dataset('iris')
# Use the 'palette' argument
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False, palette="Set2")
# Move the legend to an empty part of the plot
plt.legend(loc='lower right')
plt.show()
```
## Control color of each group
Another alternative to specify a color palette for dataset groups in a seaborn scatterplot is creating a dictionary mapping hue levels to matplotlib colors.
```
# library & dataset
import seaborn as sns
import matplotlib.pyplot as plt
df = sns.load_dataset('iris')
# Provide a dictionary to the palette argument
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False, palette=dict(setosa="#9b59b6", virginica="#3498db", versicolor="#95a5a6"))
# Move the legend to an empty part of the plot
plt.legend(loc='lower right')
plt.show()
```
|
github_jupyter
|
# library & dataset
import seaborn as sns
import matplotlib.pyplot as plt
df = sns.load_dataset('iris')
# Use the 'hue' argument to provide a factor variable
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False)
# Move the legend to an empty part of the plot
plt.legend(loc='lower right')
plt.show()
# library & dataset
import seaborn as sns
import matplotlib.pyplot as plt
df = sns.load_dataset('iris')
# give a list to the marker argument
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False, markers=["o", "x", "1"])
# Move the legend to an empty part of the plot
plt.legend(loc='lower right')
plt.show()
# library & dataset
import seaborn as sns
import matplotlib.pyplot as plt
df = sns.load_dataset('iris')
# Use the 'palette' argument
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False, palette="Set2")
# Move the legend to an empty part of the plot
plt.legend(loc='lower right')
plt.show()
# library & dataset
import seaborn as sns
import matplotlib.pyplot as plt
df = sns.load_dataset('iris')
# Provide a dictionary to the palette argument
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False, palette=dict(setosa="#9b59b6", virginica="#3498db", versicolor="#95a5a6"))
# Move the legend to an empty part of the plot
plt.legend(loc='lower right')
plt.show()
| 0.597021 | 0.987923 |
# Main features of the program and more important functions implemented
The program is useful for the *post-processing* of *ab initio* results concerning *static* energies and vibrational frequencies as functions of the unit cell volume of a crystal. Within the limit of the *quasi-harmonic* approximation (**QHA**) and within the framework of statistical thermodynamics, the Helmholtz free energy ($F$) is computed as a function of the volume of the unit cell and of the temperature; by knowing $F$ and by means of classical thermodynamics relations, a number of properties are then evaluated. These include:
### Elastic properties and thermal expansion
1. Equation of state (bulk modulus, its pressure derivative and its temperature dependence, equilibrium volume) at the *static* condition and in a range of temperatures. Currently, the 3^rd and 4^st order Birch-Murnaghan *P(V)* equations are implemented (BM3 and BM4 respectively);
2. Thermal expansion as a function of temperature and pressure.
### Thermodynamics properties
3. Specific heat at costant volume and at constant pressure;
4. Entropy;
5. Helmholtz free energy;
6. Gibbs free energy;
7. Phase transitions and mineral reactions: equilibrium curves in the P/T space. Currently the program does not consider solid solutions; only end-member mineral phases can be considered.
### Frequencies analysis
8. Analysis of the variations of vibrational frequencies with pressure, volume or temperature of the crystal.
### Phonon dispersion
Concerning *dispersion* relations in the reciprocal space, and their impact of the calculated properties, unless super-cell computations of the frequencies are available, the modified Kieffer model (see [this paper](https://pubs.geoscienceworld.org/msa/ammin/article-abstract/99/7/1449/46740/ab-initio-thermodynamic-and-thermophysical?redirectedFrom=fulltext) and the references therein) can be used to evaluate the contribution to the thermodynamics properties of the acoustic vibrational modes.
In case supercell calculations are available, the frequencies of the off-center vibrational modes, together with their volume dependence, can be taken into account. See the tutorial [here](https://qm-thermodynamics.readthedocs.io/en/main/_static/Dispersion.html).
### LO-TO splitting
The impact of the LO-TO splitting on properties can also be evaluated. See the tutorial [here](https://qm-thermodynamics.readthedocs.io/en/main/_static/LO_TO_splitting.html).
### Anharmonicity
The program *anharm.py* of the package can be used to take into account anharmonic effects on selected vibrational modes, provided that *scans* of the anharmonic potential as functions of the corresponding normal coordinates of the modes are available, at different values of the unit cell volume. See the relevant tutorial [here](https://qm-thermodynamics.readthedocs.io/en/main/_static/anharm.html).
The required data (scans of the anharmonic potential) can be obtained by using the [CRYSTAL17](https://www.crystal.unito.it/index.php) code by means of the SCANMODE keyword, after a standard frequency calculation has been performed.
Information concerning the required input files can be found in the [Basic EoS Tutorial](https://qm-thermodynamics.readthedocs.io/en/main/_static/basic_eos_tutorial.html).
The examples in the documentation below are executed with reference to the case of pyrope. The program is launched and the input files are loaded. The *input.txt* file is shown as soon as the program starts (and a static BM3 EoS is computed).
```
%run bm3_thermal_2.py
```
## Dealing with the frequencies
In the calculation of the equation of state it is possible to choose among several options; the first two choices are:
1. the optimization of a *volume-integrated* EoS (V-BM3, or V-BM4) on the *F(V)* values computed at a given temperature
2. the optimization of the EoS (BM3, or BM4) on the *P(V)* values computed at a given temperature.
Concerning the choice (1), there are three possibilities:
- **No fit of frequencies as function of the cell volume**: this option can be used when a relatively large set of frequency/volume data is available, and in cases for which the *numerical noise* possibly affecting the *ab initio* computation of the frequencies, as they change with the cell volume, is small; otherwise fits can be used:
- **Polynomial fits** of frequencies as function of volume or
- **Spline fits** of frequencies as function of volume
In the current example, a polynomial fit is requested:
~~~~~~~~~~
FITVOL
POLY
725. 769. 16 3
~~~~~~~~~~
The *FITVOL* activates the frequency fit; the *POLY* keyword below specifies a polynomial fit. The first two float numbers following the *POLY* keyword are the minimum and maximum volumes for the fit; the integer 16 in the number of points, in the volume range, at which the frequencies will be obtained (from the fit) in order to calculate the Helmholtz energy; the EoS will then be optimized on this set of 16 *F(V)* points.
The command for the EoS optimization is ***eos_temp(tt)***, where the argument ***tt*** is the temperature:
```
eos_temp(300, prt=False)
```
The other possible choice for the frequency fitting is the *spline* fit. This can be required in *input.txt* with:
~~~~~~~~~~~~~
FITVOL
SPLINE
725. 769. 16 3 1.
~~~~~~~~~~~~~
The last float number is the *smoothness* parameter. Information about the spline fit can be found [here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.html#scipy.interpolate.UnivariateSpline); look at the documentation concerning the <i>s</i> parameter for the meaning of *smoothness*.
The polynomial or spline fittings can be requested at any time, overriding the choice made in *input.txt* by the commands
set_poly(deg, npoint)
set_spline(deg, smooth, npoint)
default values for the parameters are:
* POLY fit: deg=4, npoint=16
* SPLINE fit: deg=3, smooth=5., npoint=16
As an example:
```
set_spline(3, 10.)
```
**Note:** The volume range originally requested in *input.txt* was $[725., 769. A^3]$; by requesting the spline fit, by the command above, the volume range changed. This behaviour is due to the fact that the volume ranges for the different type of fittings are allowed to be different: as no volume range was specified for the spline fitting, the default choice is to take the minimum and maximum volumes at which the frequencies have been computed, with an applied offset of 0.2 A^3. Such volumes can be visualized by the command ***info.show***:
```
info.show()
```
To change the volume range, the command ***set_volume_range*** can be issued; as we currently are in the spline fitting mode, the command will act on the volume range for the spline fit:
```
set_volume_range(725., 769., prt=True)
```
Concerning the command above, the optional parameter *npoint* (default=16) is available; the default value for *prt* is *False*: the volume range is changed without any printed notification.
The command **eos_temp** will now output the equation of state (BM3) by using a spline fit of the frequencies:
```
eos_temp(300,prt=False)
```
To *deactivate* any fitting option (POLY or SPLINE of whatever degree), the *FITVOL* keyword can be removed from the *input* file, or the command ***fit_off*** can be issued:
```
fit_off()
```
To see the effect on the equation of state:
```
eos_temp(300, prt=False)
```
A wrote above (**option 2**), the EoS can also be optimized by fitting a BM3 (or BM4) function on a *P(V)* set of data. In this case, pressures at each volume are computed as the partial derivative of <i>F</i> with respect to <i>V</i>, at constant temperature:
$$P=-\left(\frac{\partial F}{\partial V}\right)_{\!T}$$
To this end, some kind of fitting of the frequencies is **mandatory**. In fact, as we switched off fitting, by requesting the EoS *P(V)* by the ***bulk_dir*** command, we get:
```
bulk_dir(300)
```
where the argument "300" is the temperature. We have to switch on the frequency fitting, and then we can do the EoS optimization; for instance:
```
set_poly(3)
bulk_dir(300)
```
A number of commands are available to show the behaviour of frequencies with the volume and the quality of fits. For instance, the command ***check_poly_list*** will make the analysis on a specified set of modes:
```
check_poly_list([0, 2, 36])
```
Take a look at [this](https://qm-thermodynamics.readthedocs.io/en/main/_static/Dealing_with_the_frequencies.html) tutorial to see other commands of the same type.
### Equation of state
As already seen above, the two commands for optimizing the equation of state at a given temperature are:
* ***eos_temp(tt)***: optimization of a *volume-integrated F(V)* equation
* ***bulk_dir(tt)***: optimization of a *P(V)* equation
the command ***static*** can be issued to get the *static* equation of state (3^rd order only): this is based on the static energy values only (no vibrational effects included):
```
static(plot=True)
```
A 4^th order Birch-Murnaghan EoS can also be optimized; see the relevant section in [basic_eos_tutorial](https://qm-thermodynamics.readthedocs.io/en/main/_static/basic_eos_tutorial.html) for details.
#### Fixing *Kp*
For BM3 optimization (with no fit of frequencies, as well as with all the possible fit types), the first derivative of the bulk modulus with respect to the pressure (*Kp*) can be fixed. This can be achieved by issuing the command ***set_fix(kp)*** whose argument is the fixed value given for *Kp* (default: 4). For instance:
```
set_fix(4.5)
bulk_dir(300)
```
**Note** that some commands can override the directive **set_fix** by providing the optional keyword argument ***fix***: precisely, if ***fix=0.***, *Kp* is optimized:
```
bulk_dir(300, fix=0.)
```
The command ***reset_fix*** deactivates the ***set_fix*** directive:
```
reset_fix()
bulk_dir(300)
```
**Note** however that in:
```
bulk_dir(300, fix=4.5)
```
the keyword ***fix*** (*not set equal to* 0.) forces *Kp* to be fixed at the given value (of 4.5). Other commands behave in a similar way; however ***eos_temp*** always optimizes *Kp* unless it is fixed with ***set_fix***.
**Note** Due to correlations among *K0, Kp* and *V0*, it is generally advised to keep *Kp* fixed when the bulk modulus is used to compute other properties depending on its variations with *P, V* or *T*. For instance:
```
reset_fix()
bulk_serie(300,600, 12, degree=1)
set_fix(4.5)
bulk_serie(300,600, 12, degree=1)
```
In particular, the function ***bulk_serie*** is used to compute the *K0* dependence upon <i>T</i> (in a given <i>T</i> range). The $K_0(T)$ values are then fitted by a polynomial of a given degree; in the example, the <i>T</i> range is the *linear* one and $dK_0/dT = -0.0371$, with *Kp* fixed, or $-0.0348$ if *Kp* is not fixed.
### Thermal expansion
The command ***thermal_exp_p(t,p)*** can be used to compute the thermal expansion at a given temperature (<i>t</i>) and pressure (<i>p</i>). Note in the example that we a still under the ***set_fix*** directive issued above, as demonstrated by the ***fix_status*** command:
```
fix_status()
```
To inquire about the current fit status of the frequencies, the command ***fit_status*** must be given:
```
fit_status()
```
The thermal expansion at *T = 300K* and *P = 0 GPa* is:
```
thermal_exp_p(300, 0)
```
In this calculation *Kp* (which is an intermediate result of the algorithm) can be optimized by using the optional keyword ***fix***:
```
thermal_exp_p(300, 0, fix=0.)
```
Thermal expansion (at constant pressure) in a temperature range and at a given pressure, can be computed by the function ***alpha_serie***. In the example, $\alpha(T)$ is computed in the range $[20, 600K]$ (18 points) at a pressure of 0GPa. The ***fit*** optional keyword (default: True) requests a fit of the $\alpha(T)$ values by a polynomial function whose $T$ powers are specified in the input file:
~~~~~~~~~~~~~~
ALPHA
0. -2 -1 -0.5
~~~~~~~~~~~~~~
This means that the thermal expansion is fitted as:
$$\alpha(T) = k + aT^{-2} + bT^{-1} + cT^{-0.5}$$
where $k, a, b, c, $ are the optimized constants.
```
alpha_serie(20, 600, 18, 0, prt=False, fit=True)
```
The array printed at the bottom of the cell contains the optimized parameters (following the order of the corresponding given powers). If the optional keyword ***prt*** is set to True (default), a list of the computed $\alpha$ values is provided.
### Entropy and specific heat
The function ***entropy_p*** computes the entropy at given temperature and pressure; it also provides the **specific heat at constant volume** ($C_V$). In the example, the entropy is computed at *T=300K* and *P=0GPa*:
```
entropy_p(300, 0)
```
The **specific heat at constant pressure** ($C_P$) is provided by the function ***cp***; the temperature and the pressure must be specified; the function also provides $C_V$, $\alpha$ and $K_0$:
```
cp(300,0, prt=True)
```
A list of values of $C_P$ in a given temperature range can be obtained by the function ***cp_serie***; in the example, we compute $C_P$ in the range $[40, 400K]$ (12 points) at *P=0GPa*; a fit is requested (optional keyword ***fit*** set to True, by default) with a polynomial whose powers are specified in the input file under the keyword *CP* (same structure as the ALPHA fit described above):
```
cp_serie(40,400,12, 0, prt=True)
```
To have a printout of the coefficients of the fitting polynomial, set ***prt=False*** to suppress the printing of the $C_P$ values at each temperature in the interval (to suppress the plot, too, use the ***graph*** optional keyword):
```
cp_serie(40,400,18, 0, prt=False, graph=False)
```
Such values of the coefficient can be saved in a variable like:
```
cp_coeff=cp_serie(40,400,18, 0, prt=False, graph=False)
```
and used to compute $C_P$ at any temperature by using the polynomial fitting function ***cp_fun***:
```
print(cp_coeff)
round(cp_fun(300, *cp_coeff),2)
```
In the last example, the Python *round* command has been used to approximate the printed value returned by *cp_fun* to 2 decimal digits.
### LO-TO splitting
In the example above, the presence of the **LO** keyword in the *input.txt* file, followed by a file name (LO.txt in this case), requests the LO-TO splitting correction to the frequencies.
The LO.txt file contains two columns: the first one specifies the mode numbers (for those modes affected by LO-TO splitting only); the second column specifies the corresponding values of the split (in $cm^{-1}$).
The presence of the LO keyword in the input file produces *corrected* frequencies for the modes affected by LO-TO splitting. Precisely, the new frequencies $\nu_c$ are computed as follows
$$\nu_c = 2/3\cdot \nu_{TO} + 1/3\cdot \nu_{LO}$$
where $\nu_{LO}=\nu_{TO}+\Delta\nu$ and $\Delta\nu$ is the LO-TO split value. The TO values are found in the *frequencies* file specified by the keyword *FREQ*.
The corrected frequencies $\nu_c$ are internally stored in the *class* *lo*, along with a copy of the original TO frequencies, and used in all the subsequent calculations instead of the TO frequencies.
To deactivate the LO-TO correction, either the LO keyword can be deleted from the input file, or the method ***lo.off*** can be issued (***lo.on*** re-activates the correction):
```
lo.off()
```
To see the effect of the correction on the EoS parameters in this case:
```
reset_fix()
eos_temp(300,prt=False)
```
which is to be compared with:
```
lo.on()
eos_temp(300, prt=False)
```
The bulk modulus is not significantly affected by such LO-TO correction; however thermodynamic properties can be changed:
```
lo.off()
cp(300,0,prt=True)
```
to be compared with:
```
lo.on()
cp(300,0,prt=True)
```
### The Kieffer's model for the acoustic branches
The presence of the **KIEFFER** keyword in the *input.txt* file activates the evaluation of the contribution to the $F$ Helmholtz free energy coming from the acoustic phonon branches; such contribution is neglected in a standard calculation of frequencies at the center of the Brillouin zone. The keyword is followed by three numbers representing the frequencies of the acoustic modes at the Brillouin zone boundary (in cm^-1), that can be estimated from the knowledge of the elastic tensor of the crystal.
Apart from being activated by the keyword **KIEFFER**, the correction can be applied by the methods ***kieffer.on*** (to set up the calculation) and ***kieffer.freq*** to provide the frequencies (if not provided in the input file, or to change them). The method ***kieffer.plot*** can be used to plot the $F_{acoustic}(T)$ function:
```
kieffer.on()
kieffer.freq(300., 350., 400.)
kieffer.plot()
```
At any temperature, the contribution to $F$ from the acoustic branches can be recovered with the method ***kieffer.get_value***; for instance:
```
kieffer.get_value(300)
```
**Note** that the Kieffer's model does not take into account a possible variation of the frequencies of the acoustic modes with the unit cell volume; therefore it has no impact on all those properties depending uniquely upon the derivative of $F$ with respect to $V$; <i>e.g.</i>:
```
eos_temp(300, prt=False)
```
On the other hand, other properties can be significantly affected:
```
cp(300,0,prt=True)
```
to be compared with:
```
kieffer.off()
cp(300,0,prt=True)
```
## Thermodynamics and mineral equilibria
Functions exist to compute and to plot the Gibbs free energy $G=F+PV$ as function of $T$ or $P$; for instance:
```
set_fix(4.39)
gibbs_serie_t(200,400,10,0., v0=py.v0, g0=py.g0)
```
In the above axample, the Gibbs pree energy of pyrope has been computed at 10 points, in the $[200, 400K]$ temperature range, at a fixed pressure of 0GPa.
The keywords v0 and g0, among the optional arguments of the function above, specify values from other sources of the molar volume and of the Gibbs free energy at the standard conditions. Such values are retrieved from a database and stored as attributes of the variable *py* of the *mineral* class. The ***py.info*** method prints out all the stored information concerning pyrope (in the present case, from the Holland & Powell database):
```
py.info()
```
To compute the $G$ energy in the range $[0, 10GPa]$ of pressure (10 points), at T=300K, use the function ***gibbs_serie_p***:
```
gibbs_serie_p(0, 10, 10, 300., v0=py.v0, g0=py.g0)
```
Below, the equilibrium curve in the *P/T* space, for the reaction ***enstatite + corundum <--> pyrope*** is computed.
The Gibbs free energy of each mineral phase involved is evaluated by integrating
$$dG = V{\rm d}P - S{\rm d}T$$
The contribution to $G$ from the integration over $T$ is computed at the reference pressure of 1 bar, by knowing the specific heat $C_P$ as a function of $T$ (at $P=1$ bar). The contribution to $G$ from the integration over $P$ is computed by knowing $V$ as a function of $P$ at the fixed temperature $T$; $V(T,P)$ is derived from the knowledge of the thermal expansion (as a function of $T$) and the equation of state, for which
$$K=K_0 + Kp\cdot P + b\cdot(T-T_{ref})$$
where $K_0$ is the bulk modulus at $P=1$ bar and $T=T_{ref}=298.15$ K, and $b$ is a constant.
Values for all these quantities are stored in the database defined within the program (Holland and Powell database, and printed by the methods ***py.info***, ***ens.info*** and ***cor.info***).
By issuing the command ***equilib***, specifying a range of temperatures (300, 500K), and a number of $T$ points in the range (8), the equilibrium pressure (in GPa) of the reaction, at each temperature is computed:
```
equilib(300,600,12, prod=['py',1], rea=['ens', 1.5, 'cor',1])
```
**Note** the syntax of the command ***equilib***: two lists must be provided (enclosed in square brackets) and, precisely, the ***prod*** list of the products of the reaction, and the ***rea*** list of the reactants; the mineral species in each list are identified by their names, as stored in the internal database of the program, and are followed by the corresponding stoichiometric coefficients they have in the reaction. In this example, we have *3/2 ens + cor <--> py*
Quantum-mechanical based properties for bulk modulus (and its pressure and temperature derivatives), thermal expansion, specific heat and entropy can be *uploaded*. ***The upload_mineral*** command can be used to make appropriate fits of all those quantities in a given temperature range (300-500K in the example), that are subsequently uploaded in the ***py*** *object*.
Here, the volume range for the frequency fitting is expanded to cover an higher temperature range at low pressure; the conditions under which the calculation is done are checked by the method ***info.show()***, and then the ***upload_mineral*** command is issued:
```
reset_fix()
set_volume_range(725,772)
info.show()
upload_mineral(200,600, mqm='py', volc=True)
```
**Note** the *mqm* keyword appearing as argument of the ***upload_mineral*** command: it specifies that *ab initio* thermodynamics data are referred to pyrope (data for enstatite and corundum are from the Holland & Powell database).
The new data for pyrope are:
```
py.info()
```
The new equilibrium curve for the reaction, in the *P/T* space, is then:
```
equilib(300,600,12, prod=['py',1], rea=['ens', 1.5, 'cor',1])
```
which is in very good agreement with that derived by using the Holland & Powell data for pyrope.
|
github_jupyter
|
%run bm3_thermal_2.py
eos_temp(300, prt=False)
set_spline(3, 10.)
info.show()
set_volume_range(725., 769., prt=True)
eos_temp(300,prt=False)
fit_off()
eos_temp(300, prt=False)
bulk_dir(300)
set_poly(3)
bulk_dir(300)
check_poly_list([0, 2, 36])
static(plot=True)
set_fix(4.5)
bulk_dir(300)
bulk_dir(300, fix=0.)
reset_fix()
bulk_dir(300)
bulk_dir(300, fix=4.5)
reset_fix()
bulk_serie(300,600, 12, degree=1)
set_fix(4.5)
bulk_serie(300,600, 12, degree=1)
fix_status()
fit_status()
thermal_exp_p(300, 0)
thermal_exp_p(300, 0, fix=0.)
alpha_serie(20, 600, 18, 0, prt=False, fit=True)
entropy_p(300, 0)
cp(300,0, prt=True)
cp_serie(40,400,12, 0, prt=True)
cp_serie(40,400,18, 0, prt=False, graph=False)
cp_coeff=cp_serie(40,400,18, 0, prt=False, graph=False)
print(cp_coeff)
round(cp_fun(300, *cp_coeff),2)
lo.off()
reset_fix()
eos_temp(300,prt=False)
lo.on()
eos_temp(300, prt=False)
lo.off()
cp(300,0,prt=True)
lo.on()
cp(300,0,prt=True)
kieffer.on()
kieffer.freq(300., 350., 400.)
kieffer.plot()
kieffer.get_value(300)
eos_temp(300, prt=False)
cp(300,0,prt=True)
kieffer.off()
cp(300,0,prt=True)
set_fix(4.39)
gibbs_serie_t(200,400,10,0., v0=py.v0, g0=py.g0)
py.info()
gibbs_serie_p(0, 10, 10, 300., v0=py.v0, g0=py.g0)
equilib(300,600,12, prod=['py',1], rea=['ens', 1.5, 'cor',1])
reset_fix()
set_volume_range(725,772)
info.show()
upload_mineral(200,600, mqm='py', volc=True)
py.info()
equilib(300,600,12, prod=['py',1], rea=['ens', 1.5, 'cor',1])
| 0.162746 | 0.988799 |
This notebook illustrates a problem related to errors in Landsat 7 images which probably occurrs due to oversaturation or due to some other processing errors. This error is not observed in Landsat 8 or other image collections.
The error can be easily fixed when working with raw DN values, but it becomes much more difficult to clean these errors once DN radiance values are converted into reflectance values.
```
import math
import rasterio
from rasterio import plot
import pandas as pd
import numpy as np
import numpy.ma as ma
import datashader as ds
import datashader.transfer_functions as tf
%matplotlib inline
import matplotlib.pyplot as plt
def fix(radiance):
"""
Fix error in DN by skipping 255
"""
return radiance[radiance != 255]
def to_dn(radiance, props):
"""
Convert DN > Reflectance
"""
factor = np.sin(props['SUN_ELEVATION'] * math.pi / 180)
gain = props['MULT'] / factor
bias = props['ADD'] / factor
return radiance * gain + bias
def plot_values(data_frame):
canvas = ds.Canvas(plot_width=200, plot_height=200, x_range=(0, 0.5), y_range=(0, 0.5))
agg = canvas.points(data_frame, 'B2', 'B5')
return tf.shade(agg, cmap=['lightblue', 'darkblue'], how='eq_hist')
```
Original L7 files can be found in NASA/USGS Earth Explorer or in https://cloud.google.com/storage/docs/public-datasets/landsat
Only a small clipped part of the original scent is used here to save space. It was extracted using following commands:
```
# !rio clip LE07_L1TP_002052_20170107_20170202_01_T1_B2.TIF B2.tif --bounds "371171.024141185 1254575.6977045687 377142.1236432 1260522.173282852"
# !rio clip LE07_L1TP_002052_20170107_20170202_01_T1_B5.TIF B5.tif --bounds "371171.024141185 1254575.6977045687 377142.1236432 1260522.173282852"
# read clipped band images
image_b2 = rasterio.open('B2.tif')
image_b5 = rasterio.open('B5.tif')
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,7))
plot.show(image_b2, ax=ax1, cmap='Greens')
plot.show(image_b5, ax=ax2, cmap='Greys')
fig.tight_layout()
# it looks like B2 experiences oversaturation, but also in other bands
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3))
plot.show_hist(image_b2, ax=ax1, bins=50, lw=0.0, stacked=False, alpha=0.3, histtype='stepfilled', title='B2')
plot.show_hist(image_b5, ax=ax2, bins=50, lw=0.0, stacked=False, alpha=0.3, histtype='stepfilled', title='B5')
fig.tight_layout()
# parameters are from LE07_L1TP_002052_20170107_20170202_01_T1_MTL.txt
props_b2 = {
'SUN_ELEVATION': 47.44972921,
'MULT': 1.3075E-03, # REFLECTANCE_MULT_BAND_2
'ADD': -0.011783 # REFLECTANCE_ADD_BAND_2
}
props_b5 = {
'SUN_ELEVATION': 47.44972921,
'MULT': 1.7303E-03, # REFLECTANCE_MULT_BAND_5
'ADD': -0.015439 # REFLECTANCE_ADD_BAND_5
}
# read data
data_b2 = pd.DataFrame(image_b2.read().flatten(), columns=['B2'])
data_b5 = pd.DataFrame(image_b5.read().flatten(), columns=['B5'])
# convert DN > reflectance
data_b2_nofix = to_dn(data_b2, props_b2)
data_b5_nofix = to_dn(data_b5, props_b5)
# fix the error and then convert DN > reflectance
data_b2_fix = to_dn(fix(data_b2), props_b2)
data_b5_fix = to_dn(fix(data_b5), props_b5)
# combine bands
data_nofix = pd.concat([data_b2_nofix, data_b5_nofix], axis=1, join_axes=[data_b2_nofix.index])
data_fix = pd.concat([data_b2_fix, data_b5_fix], axis=1, join_axes=[data_b2_fix.index])
# save
data_nofix.to_csv('data_nofix.csv')
data_fix.to_csv('data_fix.csv')
```
Without removing 255, the resulting reflectance values generate spikes and this becomes a real problem, since then can't be removed anymore without using some special filtering. With a multitemporal analysis, this generates many spikes.
```
plot_values(data_nofix)
```
A simple fix (masking out DN=255) results in a clean reflectance plot
```
plot_values(data_fix)
```
|
github_jupyter
|
import math
import rasterio
from rasterio import plot
import pandas as pd
import numpy as np
import numpy.ma as ma
import datashader as ds
import datashader.transfer_functions as tf
%matplotlib inline
import matplotlib.pyplot as plt
def fix(radiance):
"""
Fix error in DN by skipping 255
"""
return radiance[radiance != 255]
def to_dn(radiance, props):
"""
Convert DN > Reflectance
"""
factor = np.sin(props['SUN_ELEVATION'] * math.pi / 180)
gain = props['MULT'] / factor
bias = props['ADD'] / factor
return radiance * gain + bias
def plot_values(data_frame):
canvas = ds.Canvas(plot_width=200, plot_height=200, x_range=(0, 0.5), y_range=(0, 0.5))
agg = canvas.points(data_frame, 'B2', 'B5')
return tf.shade(agg, cmap=['lightblue', 'darkblue'], how='eq_hist')
# !rio clip LE07_L1TP_002052_20170107_20170202_01_T1_B2.TIF B2.tif --bounds "371171.024141185 1254575.6977045687 377142.1236432 1260522.173282852"
# !rio clip LE07_L1TP_002052_20170107_20170202_01_T1_B5.TIF B5.tif --bounds "371171.024141185 1254575.6977045687 377142.1236432 1260522.173282852"
# read clipped band images
image_b2 = rasterio.open('B2.tif')
image_b5 = rasterio.open('B5.tif')
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,7))
plot.show(image_b2, ax=ax1, cmap='Greens')
plot.show(image_b5, ax=ax2, cmap='Greys')
fig.tight_layout()
# it looks like B2 experiences oversaturation, but also in other bands
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3))
plot.show_hist(image_b2, ax=ax1, bins=50, lw=0.0, stacked=False, alpha=0.3, histtype='stepfilled', title='B2')
plot.show_hist(image_b5, ax=ax2, bins=50, lw=0.0, stacked=False, alpha=0.3, histtype='stepfilled', title='B5')
fig.tight_layout()
# parameters are from LE07_L1TP_002052_20170107_20170202_01_T1_MTL.txt
props_b2 = {
'SUN_ELEVATION': 47.44972921,
'MULT': 1.3075E-03, # REFLECTANCE_MULT_BAND_2
'ADD': -0.011783 # REFLECTANCE_ADD_BAND_2
}
props_b5 = {
'SUN_ELEVATION': 47.44972921,
'MULT': 1.7303E-03, # REFLECTANCE_MULT_BAND_5
'ADD': -0.015439 # REFLECTANCE_ADD_BAND_5
}
# read data
data_b2 = pd.DataFrame(image_b2.read().flatten(), columns=['B2'])
data_b5 = pd.DataFrame(image_b5.read().flatten(), columns=['B5'])
# convert DN > reflectance
data_b2_nofix = to_dn(data_b2, props_b2)
data_b5_nofix = to_dn(data_b5, props_b5)
# fix the error and then convert DN > reflectance
data_b2_fix = to_dn(fix(data_b2), props_b2)
data_b5_fix = to_dn(fix(data_b5), props_b5)
# combine bands
data_nofix = pd.concat([data_b2_nofix, data_b5_nofix], axis=1, join_axes=[data_b2_nofix.index])
data_fix = pd.concat([data_b2_fix, data_b5_fix], axis=1, join_axes=[data_b2_fix.index])
# save
data_nofix.to_csv('data_nofix.csv')
data_fix.to_csv('data_fix.csv')
plot_values(data_nofix)
plot_values(data_fix)
| 0.465387 | 0.91383 |
```
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'v1.0.2'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio>=0.6.0 -f https://download.pytorch.org/whl/torch_stable.html
## Grab the config we'll use in this example
!mkdir configs
```
# Introduction
This Speech Command recognition tutorial is based on the MatchboxNet model from the paper ["MatchboxNet: 1D Time-Channel Separable Convolutional Neural Network Architecture for Speech Commands Recognition"](https://arxiv.org/abs/2004.08531). MatchboxNet is a modified form of the QuartzNet architecture from the paper "[QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions](https://arxiv.org/pdf/1910.10261.pdf)" with a modified decoder head to suit classification tasks.
The notebook will follow the steps below:
- Dataset preparation: Preparing Google Speech Commands dataset
- Audio preprocessing (feature extraction): signal normalization, windowing, (log) spectrogram (or mel scale spectrogram, or MFCC)
- Data augmentation using SpecAugment "[SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://arxiv.org/abs/1904.08779)" to increase the number of data samples.
- Develop a small Neural classification model that can be trained efficiently.
- Model training on the Google Speech Commands dataset in NeMo.
- Evaluation of error cases of the model by audibly hearing the samples
```
# Some utility imports
import os
from omegaconf import OmegaConf
# This is where the Google Speech Commands directory will be placed.
# Change this if you don't want the data to be extracted in the current directory.
# Select the version of the dataset required as well (can be 1 or 2)
DATASET_VER = 1
data_dir = './google_dataset_v{0}/'.format(DATASET_VER)
if DATASET_VER == 1:
MODEL_CONFIG = "matchboxnet_3x1x64_v1.yaml"
else:
MODEL_CONFIG = "matchboxnet_3x1x64_v2.yaml"
if not os.path.exists(f"configs/{MODEL_CONFIG}"):
!wget -P configs/ "https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/asr/conf/matchboxnet/{MODEL_CONFIG}"
```
# Data Preparation
We will be using the open-source Google Speech Commands Dataset (we will use V1 of the dataset for the tutorial but require minor changes to support the V2 dataset). These scripts below will download the dataset and convert it to a format suitable for use with NeMo.
## Download the dataset
The dataset must be prepared using the scripts provided under the `{NeMo root directory}/scripts` sub-directory.
Run the following command below to download the data preparation script and execute it.
**NOTE**: You should have at least 4GB of disk space available if you’ve used --data_version=1; and at least 6GB if you used --data_version=2. Also, it will take some time to download and process, so go grab a coffee.
**NOTE**: You may additionally pass a `--rebalance` flag at the end of the `process_speech_commands_data.py` script to rebalance the class samples in the manifest.
```
if not os.path.exists("process_speech_commands_data.py"):
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/dataset_processing/process_speech_commands_data.py
```
### Preparing the manifest file
The manifest file is a simple file that has the full path to the audio file, the duration of the audio file, and the label that is assigned to that audio file.
This notebook is only a demonstration, and therefore we will use the `--skip_duration` flag to speed up construction of the manifest file.
**NOTE: When replicating the results of the paper, do not use this flag and prepare the manifest file with correct durations.**
```
!mkdir {data_dir}
!python process_speech_commands_data.py --data_root={data_dir} --data_version={DATASET_VER} --skip_duration --log
print("Dataset ready !")
```
## Prepare the path to manifest files
```
dataset_path = 'google_speech_recognition_v{0}'.format(DATASET_VER)
dataset_basedir = os.path.join(data_dir, dataset_path)
train_dataset = os.path.join(dataset_basedir, 'train_manifest.json')
val_dataset = os.path.join(dataset_basedir, 'validation_manifest.json')
test_dataset = os.path.join(dataset_basedir, 'validation_manifest.json')
```
## Read a few rows of the manifest file
Manifest files are the data structure used by NeMo to declare a few important details about the data :
1) `audio_filepath`: Refers to the path to the raw audio file <br>
2) `command`: The class label (or speech command) of this sample <br>
3) `duration`: The length of the audio file, in seconds.
```
!head -n 5 {train_dataset}
```
# Training - Preparation
We will be training a MatchboxNet model from the paper ["MatchboxNet: 1D Time-Channel Separable Convolutional Neural Network Architecture for Speech Commands Recognition"](https://arxiv.org/abs/2004.08531). The benefit of MatchboxNet over JASPER models is that they use 1D Time-Channel Separable Convolutions, which greatly reduce the number of parameters required to obtain good model accuracy.
MatchboxNet models generally follow the model definition pattern QuartzNet-[BxRXC], where B is the number of blocks, R is the number of convolutional sub-blocks, and C is the number of channels in these blocks. Each sub-block contains a 1-D masked convolution, batch normalization, ReLU, and dropout.
An image of QuartzNet, the base configuration of MatchboxNet models, is provided below.
<p align="center">
<img src="https://developer.nvidia.com/blog/wp-content/uploads/2020/05/quartznet-model-architecture-1-625x742.png">
</p>
```
# NeMo's "core" package
import nemo
# NeMo's ASR collection - this collections contains complete ASR models and
# building blocks (modules) for ASR
import nemo.collections.asr as nemo_asr
```
## Model Configuration
The MatchboxNet Model is defined in a config file which declares multiple important sections.
They are:
1) `model`: All arguments that will relate to the Model - preprocessors, encoder, decoder, optimizer and schedulers, datasets and any other related information
2) `trainer`: Any argument to be passed to PyTorch Lightning
```
# This line will print the entire config of the MatchboxNet model
config_path = f"configs/{MODEL_CONFIG}"
config = OmegaConf.load(config_path)
config = OmegaConf.to_container(config, resolve=True)
config = OmegaConf.create(config)
print(OmegaConf.to_yaml(config))
# Preserve some useful parameters
labels = config.model.labels
sample_rate = config.sample_rate
```
### Setting up the datasets within the config
If you'll notice, there are a few config dictionaries called `train_ds`, `validation_ds` and `test_ds`. These are configurations used to setup the Dataset and DataLoaders of the corresponding config.
```
print(OmegaConf.to_yaml(config.model.train_ds))
```
### `???` inside configs
You will often notice that some configs have `???` in place of paths. This is used as a placeholder so that the user can change the value at a later time.
Let's add the paths to the manifests to the config above.
```
config.model.train_ds.manifest_filepath = train_dataset
config.model.validation_ds.manifest_filepath = val_dataset
config.model.test_ds.manifest_filepath = test_dataset
```
## Building the PyTorch Lightning Trainer
NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem!
Lets first instantiate a Trainer object!
```
import torch
import pytorch_lightning as pl
print("Trainer config - \n")
print(OmegaConf.to_yaml(config.trainer))
# Lets modify some trainer configs for this demo
# Checks if we have GPU available and uses it
cuda = 1 if torch.cuda.is_available() else 0
config.trainer.gpus = cuda
# Reduces maximum number of epochs to 5 for quick demonstration
config.trainer.max_epochs = 5
# Remove distributed training flags
config.trainer.accelerator = None
trainer = pl.Trainer(**config.trainer)
```
## Setting up a NeMo Experiment
NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it !
```
from nemo.utils.exp_manager import exp_manager
exp_dir = exp_manager(trainer, config.get("exp_manager", None))
# The exp_dir provides a path to the current experiment for easy access
exp_dir = str(exp_dir)
exp_dir
```
## Building the MatchboxNet Model
MatchboxNet is an ASR model with a classification task - it generates one label for the entire provided audio stream. Therefore we encapsulate it inside the `EncDecClassificationModel` as follows.
```
asr_model = nemo_asr.models.EncDecClassificationModel(cfg=config.model, trainer=trainer)
```
# Training a MatchboxNet Model
As MatchboxNet is inherently a PyTorch Lightning Model, it can easily be trained in a single line - `trainer.fit(model)` !
### Monitoring training progress
Before we begin training, let's first create a Tensorboard visualization to monitor progress
```
try:
from google import colab
COLAB_ENV = True
except (ImportError, ModuleNotFoundError):
COLAB_ENV = False
# Load the TensorBoard notebook extension
if COLAB_ENV:
%load_ext tensorboard
else:
print("To use tensorboard, please use this notebook in a Google Colab environment.")
if COLAB_ENV:
%tensorboard --logdir {exp_dir}
else:
print("To use tensorboard, please use this notebook in a Google Colab environment.")
```
### Training for 5 epochs
We see below that the model begins to get modest scores on the validation set after just 5 epochs of training
```
trainer.fit(asr_model)
```
### Evaluation on the Test set
Lets compute the final score on the test set via `trainer.test(model)`
```
trainer.test(asr_model, ckpt_path=None)
```
# Fast Training
We can dramatically improve the time taken to train this model by using Multi GPU training along with Mixed Precision.
For multi-GPU training, take a look at [the PyTorch Lightning Multi-GPU training section](https://pytorch-lightning.readthedocs.io/en/latest/advanced/multi_gpu.html)
For mixed-precision training, take a look at [the PyTorch Lightning Mixed-Precision training section](https://pytorch-lightning.readthedocs.io/en/latest/advanced/amp.html)
```python
# Mixed precision:
trainer = Trainer(amp_level='O1', precision=16)
# Trainer with a distributed backend:
trainer = Trainer(gpus=2, num_nodes=2, accelerator='ddp')
# Of course, you can combine these flags as well.
```
# Evaluation of incorrectly predicted samples
Given that we have a trained model, which performs reasonably well, let's try to listen to the samples where the model is least confident in its predictions.
For this, we need the support of the librosa library.
**NOTE**: The following code depends on librosa. To install it, run the following code block first.
```
!pip install librosa
```
## Extract the predictions from the model
We want to possess the actual logits of the model instead of just the final evaluation score, so we can define a function to perform the forward step for us without computing the final loss. Instead, we extract the logits per batch of samples provided.
## Accessing the data loaders
We can utilize the `setup_test_data` method in order to instantiate a data loader for the dataset we want to analyze.
For convenience, we can access these instantiated data loaders using the following accessors - `asr_model._train_dl`, `asr_model._validation_dl` and `asr_model._test_dl`.
```
asr_model.setup_test_data(config.model.test_ds)
test_dl = asr_model._test_dl
```
## Partial Test Step
Below we define a utility function to perform most of the test step. For reference, the test step is defined as follows:
```python
def test_step(self, batch, batch_idx, dataloader_idx=0):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy(logits=logits, labels=labels)
return {'test_loss': loss_value, 'test_correct_counts': correct_counts, 'test_total_counts': total_counts}
```
```
@torch.no_grad()
def extract_logits(model, dataloader):
logits_buffer = []
label_buffer = []
# Follow the above definition of the test_step
for batch in dataloader:
audio_signal, audio_signal_len, labels, labels_len = batch
logits = model(input_signal=audio_signal, input_signal_length=audio_signal_len)
logits_buffer.append(logits)
label_buffer.append(labels)
print(".", end='')
print()
print("Finished extracting logits !")
logits = torch.cat(logits_buffer, 0)
labels = torch.cat(label_buffer, 0)
return logits, labels
cpu_model = asr_model.cpu()
cpu_model.eval()
logits, labels = extract_logits(cpu_model, test_dl)
print("Logits:", logits.shape, "Labels :", labels.shape)
# Compute accuracy - `_accuracy` is a PyTorch Lightning Metric !
acc = cpu_model._accuracy(logits=logits, labels=labels)
print("Accuracy : ", float(acc[0]*100))
```
## Filtering out incorrect samples
Let us now filter out the incorrectly labeled samples from the total set of samples in the test set
```
import librosa
import json
import IPython.display as ipd
# First let's create a utility class to remap the integer class labels to actual string label
class ReverseMapLabel:
def __init__(self, data_loader):
self.label2id = dict(data_loader.dataset.label2id)
self.id2label = dict(data_loader.dataset.id2label)
def __call__(self, pred_idx, label_idx):
return self.id2label[pred_idx], self.id2label[label_idx]
# Next, let's get the indices of all the incorrectly labeled samples
sample_idx = 0
incorrect_preds = []
rev_map = ReverseMapLabel(test_dl)
# Remember, evaluated_tensor = (loss, logits, labels)
probs = torch.softmax(logits, dim=-1)
probas, preds = torch.max(probs, dim=-1)
total_count = cpu_model._accuracy.total_counts_k[0]
incorrect_ids = (preds != labels).nonzero()
for idx in incorrect_ids:
proba = float(probas[idx][0])
pred = int(preds[idx][0])
label = int(labels[idx][0])
idx = int(idx[0]) + sample_idx
incorrect_preds.append((idx, *rev_map(pred, label), proba))
print(f"Num test samples : {total_count.item()}")
print(f"Num errors : {len(incorrect_preds)}")
# First lets sort by confidence of prediction
incorrect_preds = sorted(incorrect_preds, key=lambda x: x[-1], reverse=False)
```
## Examine a subset of incorrect samples
Let's print out the (test id, predicted label, ground truth label, confidence) tuple of first 20 incorrectly labeled samples
```
for incorrect_sample in incorrect_preds[:20]:
print(str(incorrect_sample))
```
## Define a threshold below which we designate a model's prediction as "low confidence"
```
# Filter out how many such samples exist
low_confidence_threshold = 0.25
count_low_confidence = len(list(filter(lambda x: x[-1] <= low_confidence_threshold, incorrect_preds)))
print(f"Number of low confidence predictions : {count_low_confidence}")
```
## Let's hear the samples which the model has least confidence in !
```
# First let's create a helper function to parse the manifest files
def parse_manifest(manifest):
data = []
for line in manifest:
line = json.loads(line)
data.append(line)
return data
# Next, let's create a helper function to actually listen to certain samples
def listen_to_file(sample_id, pred=None, label=None, proba=None):
# Load the audio waveform using librosa
filepath = test_samples[sample_id]['audio_filepath']
audio, sample_rate = librosa.load(filepath)
if pred is not None and label is not None and proba is not None:
print(f"Sample : {sample_id} Prediction : {pred} Label : {label} Confidence = {proba: 0.4f}")
else:
print(f"Sample : {sample_id}")
return ipd.Audio(audio, rate=sample_rate)
# Now let's load the test manifest into memory
test_samples = []
with open(test_dataset, 'r') as test_f:
test_samples = test_f.readlines()
test_samples = parse_manifest(test_samples)
# Finally, let's listen to all the audio samples where the model made a mistake
# Note: This list of incorrect samples may be quite large, so you may choose to subsample `incorrect_preds`
count = min(count_low_confidence, 20) # replace this line with just `count_low_confidence` to listen to all samples with low confidence
for sample_id, pred, label, proba in incorrect_preds[:count]:
ipd.display(listen_to_file(sample_id, pred=pred, label=label, proba=proba))
```
# Fine-tuning on a new dataset
We currently trained our dataset on all 30/35 classes of the Google Speech Commands dataset (v1/v2).
We will now show an example of fine-tuning a trained model on a subset of the classes, as a demonstration of fine-tuning.
## Preparing the data-subsets
Let's select 2 of the classes, `yes` and `no` and prepare our manifests with this dataset.
```
import json
def extract_subset_from_manifest(name: str, manifest_path: str, labels: list):
manifest_dir = os.path.split(manifest_path)[0]
labels = set(labels)
manifest_values = []
print(f"Parsing manifest: {manifest_path}")
with open(manifest_path, 'r') as f:
for line in f:
val = json.loads(line)
if val['command'] in labels:
manifest_values.append(val)
print(f"Number of files extracted from dataset: {len(manifest_values)}")
outpath = os.path.join(manifest_dir, name)
with open(outpath, 'w') as f:
for val in manifest_values:
json.dump(val, f)
f.write("\n")
f.flush()
print("Manifest subset written to path :", outpath)
print()
return outpath
labels = ["yes", "no"]
train_subdataset = extract_subset_from_manifest("train_subset.json", train_dataset, labels)
val_subdataset = extract_subset_from_manifest("val_subset.json", val_dataset, labels)
test_subdataset = extract_subset_from_manifest("test_subset.json", test_dataset, labels)
```
## Saving/Restoring a checkpoint
There are multiple ways to save and load models in NeMo. Since all NeMo models are inherently Lightning Modules, we can use the standard way that PyTorch Lightning saves and restores models.
NeMo also provides a more advanced model save/restore format, which encapsulates all the parts of the model that are required to restore that model for immediate use.
In this example, we will explore both ways of saving and restoring models, but we will focus on the PyTorch Lightning method.
### Saving and Restoring via PyTorch Lightning Checkpoints
When using NeMo for training, it is advisable to utilize the `exp_manager` framework. It is tasked with handling checkpointing and logging (Tensorboard as well as WandB optionally!), as well as dealing with multi-node and multi-GPU logging.
Since we utilized the `exp_manager` framework above, we have access to the directory where the checkpoints exist.
`exp_manager` with the default settings will save multiple checkpoints for us -
1) A few checkpoints from certain steps of training. They will have `--val_loss=` tags
2) A checkpoint at the last epoch of training denotes by `-last`.
3) If the model finishes training, it will also have a `--end` checkpoint.
```
import glob
print(exp_dir)
# Let's list all the checkpoints we have
checkpoint_dir = os.path.join(exp_dir, 'checkpoints')
checkpoint_paths = list(glob.glob(os.path.join(checkpoint_dir, "*.ckpt")))
checkpoint_paths
# We want the checkpoint saved after the final step of training
final_checkpoint = list(filter(lambda x: "-last.ckpt" in x, checkpoint_paths))[0]
print(final_checkpoint)
```
### Restoring from a PyTorch Lightning checkpoint
To restore a model using the `LightningModule.load_from_checkpoint()` class method.
```
restored_model = nemo_asr.models.EncDecClassificationModel.load_from_checkpoint(final_checkpoint)
```
## Prepare the model for fine-tuning
Remember, the original model was trained for a 30/35 way classification task. Now we require only a subset of these models, so we need to modify the decoder head to support fewer classes.
We can do this easily with the convenient function `EncDecClassificationModel.change_labels(new_label_list)`.
By performing this step, we discard the old decoder head, but still, preserve the encoder!
```
restored_model.change_labels(labels)
```
### Prepare the data loaders
The restored model, upon restoration, will not attempt to set up any data loaders.
This is so that we can manually set up any datasets we want - train and val to finetune the model, test in order to just evaluate, or all three to do both!
The entire config that we used before can still be accessed via `ModelPT.cfg`, so we will use it in order to set up our data loaders. This also gives us the opportunity to set any additional parameters we wish to setup!
```
import copy
train_subdataset_cfg = copy.deepcopy(restored_model.cfg.train_ds)
val_subdataset_cfg = copy.deepcopy(restored_model.cfg.validation_ds)
test_subdataset_cfg = copy.deepcopy(restored_model.cfg.test_ds)
# Set the paths to the subset of the dataset
train_subdataset_cfg.manifest_filepath = train_subdataset
val_subdataset_cfg.manifest_filepath = val_subdataset
test_subdataset_cfg.manifest_filepath = test_subdataset
# Setup the data loader for the restored model
restored_model.setup_training_data(train_subdataset_cfg)
restored_model.setup_multiple_validation_data(val_subdataset_cfg)
restored_model.setup_multiple_test_data(test_subdataset_cfg)
# Check data loaders are correct
print("Train dataset labels :", restored_model._train_dl.dataset.labels)
print("Val dataset labels :", restored_model._validation_dl.dataset.labels)
print("Test dataset labels :", restored_model._test_dl.dataset.labels)
```
## Setting up a new Trainer and Experiment Manager
A restored model has a utility method to attach the Trainer object to it, which is necessary in order to correctly set up the optimizer and scheduler!
**Note**: The restored model does not contain the trainer config with it. It is necessary to create a new Trainer object suitable for the environment where the model is being trained. The template can be replicated from any of the training scripts.
Here, since we already had the previous config object that prepared the trainer, we could have used it, but for demonstration, we will set up the trainer config manually.
```
# Setup the new trainer object
# Let's modify some trainer configs for this demo
# Checks if we have GPU available and uses it
cuda = 1 if torch.cuda.is_available() else 0
trainer_config = OmegaConf.create(dict(
gpus=cuda,
max_epochs=5,
max_steps=None, # computed at runtime if not set
num_nodes=1,
accumulate_grad_batches=1,
checkpoint_callback=False, # Provided by exp_manager
logger=False, # Provided by exp_manager
log_every_n_steps=1, # Interval of logging.
val_check_interval=1.0, # Set to 0.25 to check 4 times per epoch, or an int for number of iterations
))
print(OmegaConf.to_yaml(trainer_config))
trainer_finetune = pl.Trainer(**trainer_config)
```
### Setting the trainer to the restored model
All NeMo models provide a convenience method `set_trainer()` in order to setup the trainer after restoration
```
restored_model.set_trainer(trainer_finetune)
exp_dir_finetune = exp_manager(trainer_finetune, config.get("exp_manager", None))
exp_dir_finetune = str(exp_dir_finetune)
exp_dir_finetune
```
## Setup optimizer + scheduler
For a fine-tuning experiment, let's set up the optimizer and scheduler!
We will use a much lower learning rate than before, and also swap out the scheduler from PolyHoldDecay to CosineDecay.
```
optim_sched_cfg = copy.deepcopy(restored_model.cfg.optim)
# Struct mode prevents us from popping off elements from the config, so let's disable it
OmegaConf.set_struct(optim_sched_cfg, False)
# Lets change the maximum learning rate to previous minimum learning rate
optim_sched_cfg.lr = 0.001
# Lets change the scheduler
optim_sched_cfg.sched.name = "CosineAnnealing"
# "power" isnt applicable to CosineAnnealing so let's remove it
optim_sched_cfg.sched.pop('power')
# "hold_ratio" isnt applicable to CosineAnnealing, so let's remove it
optim_sched_cfg.sched.pop('hold_ratio')
# Set "min_lr" to lower value
optim_sched_cfg.sched.min_lr = 1e-4
print(OmegaConf.to_yaml(optim_sched_cfg))
# Now lets update the optimizer settings
restored_model.setup_optimization(optim_sched_cfg)
# We can also just directly replace the config inplace if we choose to
restored_model.cfg.optim = optim_sched_cfg
```
## Fine-tune training step
We fine-tune on the subset classification problem. Note, the model was originally trained on these classes (the subset defined here has already been trained on above).
When fine-tuning on a truly new dataset, we will not see such a dramatic improvement in performance. However, it should still converge a little faster than if it was trained from scratch.
### Monitor training progress via Tensorboard
```
if COLAB_ENV:
%tensorboard --logdir {exp_dir_finetune}
else:
print("To use tensorboard, please use this notebook in a Google Colab environment.")
```
### Fine-tuning for 5 epochs
```
trainer_finetune.fit(restored_model)
```
### Evaluation on the Test set
Let's compute the final score on the test set via `trainer.test(model)`
```
trainer_finetune.test(restored_model, ckpt_path=None)
```
## Advanced Usage: Exporting a model in its entirety
While most models can be easily serialized via the Experiment Manager as a PyTorch Lightning checkpoint, there are certain models where this is insufficient.
Consider the case where a Model contains artifacts such as tokenizers or other intermediate file objects that cannot be so easily serialized into a checkpoint.
For such cases, NeMo offers two utility functions that enable serialization of a Model + artifacts - `save_to` and `restore_from`.
Further documentation regarding these methods can be obtained from the documentation pages on NeMo.
```
import tarfile
# Save a model as a tarfile
restored_model.save_to(os.path.join(exp_dir_finetune, "model.nemo"))
# The above object is just a tarfile which can store additional artifacts.
with tarfile.open(os.path.join(exp_dir_finetune, 'model.nemo')) as blob:
for item in blob:
print(item)
# Restore a model from a tarfile
restored_model_2 = nemo_asr.models.EncDecClassificationModel.restore_from(os.path.join(exp_dir_finetune, "model.nemo"))
```
## Conclusion
Once the model has been restored, either via a PyTorch Lightning checkpoint or via the `restore_from` methods, one can finetune by following the above general steps.
|
github_jupyter
|
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'v1.0.2'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio>=0.6.0 -f https://download.pytorch.org/whl/torch_stable.html
## Grab the config we'll use in this example
!mkdir configs
# Some utility imports
import os
from omegaconf import OmegaConf
# This is where the Google Speech Commands directory will be placed.
# Change this if you don't want the data to be extracted in the current directory.
# Select the version of the dataset required as well (can be 1 or 2)
DATASET_VER = 1
data_dir = './google_dataset_v{0}/'.format(DATASET_VER)
if DATASET_VER == 1:
MODEL_CONFIG = "matchboxnet_3x1x64_v1.yaml"
else:
MODEL_CONFIG = "matchboxnet_3x1x64_v2.yaml"
if not os.path.exists(f"configs/{MODEL_CONFIG}"):
!wget -P configs/ "https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/asr/conf/matchboxnet/{MODEL_CONFIG}"
if not os.path.exists("process_speech_commands_data.py"):
!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/dataset_processing/process_speech_commands_data.py
!mkdir {data_dir}
!python process_speech_commands_data.py --data_root={data_dir} --data_version={DATASET_VER} --skip_duration --log
print("Dataset ready !")
dataset_path = 'google_speech_recognition_v{0}'.format(DATASET_VER)
dataset_basedir = os.path.join(data_dir, dataset_path)
train_dataset = os.path.join(dataset_basedir, 'train_manifest.json')
val_dataset = os.path.join(dataset_basedir, 'validation_manifest.json')
test_dataset = os.path.join(dataset_basedir, 'validation_manifest.json')
!head -n 5 {train_dataset}
# NeMo's "core" package
import nemo
# NeMo's ASR collection - this collections contains complete ASR models and
# building blocks (modules) for ASR
import nemo.collections.asr as nemo_asr
# This line will print the entire config of the MatchboxNet model
config_path = f"configs/{MODEL_CONFIG}"
config = OmegaConf.load(config_path)
config = OmegaConf.to_container(config, resolve=True)
config = OmegaConf.create(config)
print(OmegaConf.to_yaml(config))
# Preserve some useful parameters
labels = config.model.labels
sample_rate = config.sample_rate
print(OmegaConf.to_yaml(config.model.train_ds))
config.model.train_ds.manifest_filepath = train_dataset
config.model.validation_ds.manifest_filepath = val_dataset
config.model.test_ds.manifest_filepath = test_dataset
import torch
import pytorch_lightning as pl
print("Trainer config - \n")
print(OmegaConf.to_yaml(config.trainer))
# Lets modify some trainer configs for this demo
# Checks if we have GPU available and uses it
cuda = 1 if torch.cuda.is_available() else 0
config.trainer.gpus = cuda
# Reduces maximum number of epochs to 5 for quick demonstration
config.trainer.max_epochs = 5
# Remove distributed training flags
config.trainer.accelerator = None
trainer = pl.Trainer(**config.trainer)
from nemo.utils.exp_manager import exp_manager
exp_dir = exp_manager(trainer, config.get("exp_manager", None))
# The exp_dir provides a path to the current experiment for easy access
exp_dir = str(exp_dir)
exp_dir
asr_model = nemo_asr.models.EncDecClassificationModel(cfg=config.model, trainer=trainer)
try:
from google import colab
COLAB_ENV = True
except (ImportError, ModuleNotFoundError):
COLAB_ENV = False
# Load the TensorBoard notebook extension
if COLAB_ENV:
%load_ext tensorboard
else:
print("To use tensorboard, please use this notebook in a Google Colab environment.")
if COLAB_ENV:
%tensorboard --logdir {exp_dir}
else:
print("To use tensorboard, please use this notebook in a Google Colab environment.")
trainer.fit(asr_model)
trainer.test(asr_model, ckpt_path=None)
# Mixed precision:
trainer = Trainer(amp_level='O1', precision=16)
# Trainer with a distributed backend:
trainer = Trainer(gpus=2, num_nodes=2, accelerator='ddp')
# Of course, you can combine these flags as well.
!pip install librosa
asr_model.setup_test_data(config.model.test_ds)
test_dl = asr_model._test_dl
def test_step(self, batch, batch_idx, dataloader_idx=0):
audio_signal, audio_signal_len, labels, labels_len = batch
logits = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy(logits=logits, labels=labels)
return {'test_loss': loss_value, 'test_correct_counts': correct_counts, 'test_total_counts': total_counts}
@torch.no_grad()
def extract_logits(model, dataloader):
logits_buffer = []
label_buffer = []
# Follow the above definition of the test_step
for batch in dataloader:
audio_signal, audio_signal_len, labels, labels_len = batch
logits = model(input_signal=audio_signal, input_signal_length=audio_signal_len)
logits_buffer.append(logits)
label_buffer.append(labels)
print(".", end='')
print()
print("Finished extracting logits !")
logits = torch.cat(logits_buffer, 0)
labels = torch.cat(label_buffer, 0)
return logits, labels
cpu_model = asr_model.cpu()
cpu_model.eval()
logits, labels = extract_logits(cpu_model, test_dl)
print("Logits:", logits.shape, "Labels :", labels.shape)
# Compute accuracy - `_accuracy` is a PyTorch Lightning Metric !
acc = cpu_model._accuracy(logits=logits, labels=labels)
print("Accuracy : ", float(acc[0]*100))
import librosa
import json
import IPython.display as ipd
# First let's create a utility class to remap the integer class labels to actual string label
class ReverseMapLabel:
def __init__(self, data_loader):
self.label2id = dict(data_loader.dataset.label2id)
self.id2label = dict(data_loader.dataset.id2label)
def __call__(self, pred_idx, label_idx):
return self.id2label[pred_idx], self.id2label[label_idx]
# Next, let's get the indices of all the incorrectly labeled samples
sample_idx = 0
incorrect_preds = []
rev_map = ReverseMapLabel(test_dl)
# Remember, evaluated_tensor = (loss, logits, labels)
probs = torch.softmax(logits, dim=-1)
probas, preds = torch.max(probs, dim=-1)
total_count = cpu_model._accuracy.total_counts_k[0]
incorrect_ids = (preds != labels).nonzero()
for idx in incorrect_ids:
proba = float(probas[idx][0])
pred = int(preds[idx][0])
label = int(labels[idx][0])
idx = int(idx[0]) + sample_idx
incorrect_preds.append((idx, *rev_map(pred, label), proba))
print(f"Num test samples : {total_count.item()}")
print(f"Num errors : {len(incorrect_preds)}")
# First lets sort by confidence of prediction
incorrect_preds = sorted(incorrect_preds, key=lambda x: x[-1], reverse=False)
for incorrect_sample in incorrect_preds[:20]:
print(str(incorrect_sample))
# Filter out how many such samples exist
low_confidence_threshold = 0.25
count_low_confidence = len(list(filter(lambda x: x[-1] <= low_confidence_threshold, incorrect_preds)))
print(f"Number of low confidence predictions : {count_low_confidence}")
# First let's create a helper function to parse the manifest files
def parse_manifest(manifest):
data = []
for line in manifest:
line = json.loads(line)
data.append(line)
return data
# Next, let's create a helper function to actually listen to certain samples
def listen_to_file(sample_id, pred=None, label=None, proba=None):
# Load the audio waveform using librosa
filepath = test_samples[sample_id]['audio_filepath']
audio, sample_rate = librosa.load(filepath)
if pred is not None and label is not None and proba is not None:
print(f"Sample : {sample_id} Prediction : {pred} Label : {label} Confidence = {proba: 0.4f}")
else:
print(f"Sample : {sample_id}")
return ipd.Audio(audio, rate=sample_rate)
# Now let's load the test manifest into memory
test_samples = []
with open(test_dataset, 'r') as test_f:
test_samples = test_f.readlines()
test_samples = parse_manifest(test_samples)
# Finally, let's listen to all the audio samples where the model made a mistake
# Note: This list of incorrect samples may be quite large, so you may choose to subsample `incorrect_preds`
count = min(count_low_confidence, 20) # replace this line with just `count_low_confidence` to listen to all samples with low confidence
for sample_id, pred, label, proba in incorrect_preds[:count]:
ipd.display(listen_to_file(sample_id, pred=pred, label=label, proba=proba))
import json
def extract_subset_from_manifest(name: str, manifest_path: str, labels: list):
manifest_dir = os.path.split(manifest_path)[0]
labels = set(labels)
manifest_values = []
print(f"Parsing manifest: {manifest_path}")
with open(manifest_path, 'r') as f:
for line in f:
val = json.loads(line)
if val['command'] in labels:
manifest_values.append(val)
print(f"Number of files extracted from dataset: {len(manifest_values)}")
outpath = os.path.join(manifest_dir, name)
with open(outpath, 'w') as f:
for val in manifest_values:
json.dump(val, f)
f.write("\n")
f.flush()
print("Manifest subset written to path :", outpath)
print()
return outpath
labels = ["yes", "no"]
train_subdataset = extract_subset_from_manifest("train_subset.json", train_dataset, labels)
val_subdataset = extract_subset_from_manifest("val_subset.json", val_dataset, labels)
test_subdataset = extract_subset_from_manifest("test_subset.json", test_dataset, labels)
import glob
print(exp_dir)
# Let's list all the checkpoints we have
checkpoint_dir = os.path.join(exp_dir, 'checkpoints')
checkpoint_paths = list(glob.glob(os.path.join(checkpoint_dir, "*.ckpt")))
checkpoint_paths
# We want the checkpoint saved after the final step of training
final_checkpoint = list(filter(lambda x: "-last.ckpt" in x, checkpoint_paths))[0]
print(final_checkpoint)
restored_model = nemo_asr.models.EncDecClassificationModel.load_from_checkpoint(final_checkpoint)
restored_model.change_labels(labels)
import copy
train_subdataset_cfg = copy.deepcopy(restored_model.cfg.train_ds)
val_subdataset_cfg = copy.deepcopy(restored_model.cfg.validation_ds)
test_subdataset_cfg = copy.deepcopy(restored_model.cfg.test_ds)
# Set the paths to the subset of the dataset
train_subdataset_cfg.manifest_filepath = train_subdataset
val_subdataset_cfg.manifest_filepath = val_subdataset
test_subdataset_cfg.manifest_filepath = test_subdataset
# Setup the data loader for the restored model
restored_model.setup_training_data(train_subdataset_cfg)
restored_model.setup_multiple_validation_data(val_subdataset_cfg)
restored_model.setup_multiple_test_data(test_subdataset_cfg)
# Check data loaders are correct
print("Train dataset labels :", restored_model._train_dl.dataset.labels)
print("Val dataset labels :", restored_model._validation_dl.dataset.labels)
print("Test dataset labels :", restored_model._test_dl.dataset.labels)
# Setup the new trainer object
# Let's modify some trainer configs for this demo
# Checks if we have GPU available and uses it
cuda = 1 if torch.cuda.is_available() else 0
trainer_config = OmegaConf.create(dict(
gpus=cuda,
max_epochs=5,
max_steps=None, # computed at runtime if not set
num_nodes=1,
accumulate_grad_batches=1,
checkpoint_callback=False, # Provided by exp_manager
logger=False, # Provided by exp_manager
log_every_n_steps=1, # Interval of logging.
val_check_interval=1.0, # Set to 0.25 to check 4 times per epoch, or an int for number of iterations
))
print(OmegaConf.to_yaml(trainer_config))
trainer_finetune = pl.Trainer(**trainer_config)
restored_model.set_trainer(trainer_finetune)
exp_dir_finetune = exp_manager(trainer_finetune, config.get("exp_manager", None))
exp_dir_finetune = str(exp_dir_finetune)
exp_dir_finetune
optim_sched_cfg = copy.deepcopy(restored_model.cfg.optim)
# Struct mode prevents us from popping off elements from the config, so let's disable it
OmegaConf.set_struct(optim_sched_cfg, False)
# Lets change the maximum learning rate to previous minimum learning rate
optim_sched_cfg.lr = 0.001
# Lets change the scheduler
optim_sched_cfg.sched.name = "CosineAnnealing"
# "power" isnt applicable to CosineAnnealing so let's remove it
optim_sched_cfg.sched.pop('power')
# "hold_ratio" isnt applicable to CosineAnnealing, so let's remove it
optim_sched_cfg.sched.pop('hold_ratio')
# Set "min_lr" to lower value
optim_sched_cfg.sched.min_lr = 1e-4
print(OmegaConf.to_yaml(optim_sched_cfg))
# Now lets update the optimizer settings
restored_model.setup_optimization(optim_sched_cfg)
# We can also just directly replace the config inplace if we choose to
restored_model.cfg.optim = optim_sched_cfg
if COLAB_ENV:
%tensorboard --logdir {exp_dir_finetune}
else:
print("To use tensorboard, please use this notebook in a Google Colab environment.")
trainer_finetune.fit(restored_model)
trainer_finetune.test(restored_model, ckpt_path=None)
import tarfile
# Save a model as a tarfile
restored_model.save_to(os.path.join(exp_dir_finetune, "model.nemo"))
# The above object is just a tarfile which can store additional artifacts.
with tarfile.open(os.path.join(exp_dir_finetune, 'model.nemo')) as blob:
for item in blob:
print(item)
# Restore a model from a tarfile
restored_model_2 = nemo_asr.models.EncDecClassificationModel.restore_from(os.path.join(exp_dir_finetune, "model.nemo"))
| 0.830628 | 0.874774 |
# Python Wrapper for CMR
`A python library to interface with CMR - Collection Search Demo`
This demo will show how to preform a **collection** search against CMR while inside a notebook.
## Loading the library
From the command line, make sure you call `runme.sh -p -i` to both backage and install the library through pip3.n
## Load modules
```
import cmr.search.collection as coll
```
## Get Online Help
At least some understanding of the CMR API will be needed from time to time, to assist with that the following call can be used to open a browser window to the API. For the fun of it, you can pass in an HTML anchor tag on the page and jump directly there.
```
coll.open_api()
```
## Searching
### Perform A Basic Searches
Search for all records that contain the word 'salt'.
```
results = coll.search({'keyword':'salt'})
print("Found {} records.".format(len(results)))
for i in results:
print (i)
```
### A Search with a columns filtered from result
Reduce the result columns by only showing the collection curration fields and drop the entry title.
This search also searches UAT
```
params = {}
#params['provider'] = 'SEDAC' # 276 records
#params['keyword'] = 'fish food' # 131 records
params['keyword'] = 'salt' # 290 records
config={'env':'uat'} # 290 in prod, 49 in UAT as of 2020-12-01
results = coll.search(params,
filters=[coll.collection_core_fields,
coll.drop_fields('EntryTitle')],
limit=1000,
config=config)
print("Found {} records.".format(len(results)))
for i in results:
print (i)
```
### Find a lot of collection records
This should find just over a full page (2000) of results.
```
params = {}
results = coll.search(params,
filters=[coll.collection_core_fields, coll.drop_fields('EntryTitle')],
limit=2048,
config={'env':'uat'})
print("Found {} records.".format(len(results)))
for i in results:
print (i)
```
## Applying Filters after a search
Internally the code calls apply_filters() but it can be called manually as show below. One reason to do this is to download the data once and then apply filters as needed.
```
params = {}
raw_results = coll.search(params, limit=2, config={'env':'uat'})
clean_results = coll.apply_filters([coll.collection_core_fields,coll.drop_fields('EntryTitle')], raw_results)
print("Found {} records.".format(len(clean_results)))
for i in clean_results:
print (i)
```
## Sorting
```
def sorted_search(params):
results = coll.search(params, filters=[coll.collection_core_fields], limit=11)
print("Found {} records.".format(len(results)))
for i in results:
print (i)
#params = {'keyword':'modis', 'sort_key': 'instrument'}
sorted_search({'keyword':'modis', 'sort_key': 'instrument'})
print('\nvs\n')
sorted_search({'keyword':'modis', 'sort_key': '-instrument'})
```
### Help with Sort Keys
Can not remember the sort keys, look them up
```
coll.open_api("#sorting-collection-results")
```
## Getting Help
print out all the docstrings, you can filter by a prefix if you want
```
print(coll.help_text())
```
----
EOF
|
github_jupyter
|
import cmr.search.collection as coll
coll.open_api()
results = coll.search({'keyword':'salt'})
print("Found {} records.".format(len(results)))
for i in results:
print (i)
params = {}
#params['provider'] = 'SEDAC' # 276 records
#params['keyword'] = 'fish food' # 131 records
params['keyword'] = 'salt' # 290 records
config={'env':'uat'} # 290 in prod, 49 in UAT as of 2020-12-01
results = coll.search(params,
filters=[coll.collection_core_fields,
coll.drop_fields('EntryTitle')],
limit=1000,
config=config)
print("Found {} records.".format(len(results)))
for i in results:
print (i)
params = {}
results = coll.search(params,
filters=[coll.collection_core_fields, coll.drop_fields('EntryTitle')],
limit=2048,
config={'env':'uat'})
print("Found {} records.".format(len(results)))
for i in results:
print (i)
params = {}
raw_results = coll.search(params, limit=2, config={'env':'uat'})
clean_results = coll.apply_filters([coll.collection_core_fields,coll.drop_fields('EntryTitle')], raw_results)
print("Found {} records.".format(len(clean_results)))
for i in clean_results:
print (i)
def sorted_search(params):
results = coll.search(params, filters=[coll.collection_core_fields], limit=11)
print("Found {} records.".format(len(results)))
for i in results:
print (i)
#params = {'keyword':'modis', 'sort_key': 'instrument'}
sorted_search({'keyword':'modis', 'sort_key': 'instrument'})
print('\nvs\n')
sorted_search({'keyword':'modis', 'sort_key': '-instrument'})
coll.open_api("#sorting-collection-results")
print(coll.help_text())
| 0.125319 | 0.851398 |
# GM仮定4が満たされない場合
If you come here without expecting Japanese, please click [Google translated version](https://translate.google.com/translate?hl=&sl=ja&tl=en&u=https%3A%2F%2Fpy4etrics.github.io%2F18_Zero_Conditional_Mean.html) in English or the language of your choice.
---
```
from scipy.stats import multivariate_normal, norm, uniform
import numpy as np
import pandas as pd
from statsmodels.formula.api import ols
import matplotlib.pyplot as plt
from numba import njit
```
## 説明
仮定4:Zero conditional mean $\text{E}\left(u|X\right)=0$
* この仮定が満たされない場合,`OLS`推定量は不偏性・一致性が満たされない。
* 経済学の実証分析では,この仮定が満たされない場合が多々ある。その主な理由に次の3つが挙げられる。
* 欠落変数
* 測定誤差
* 同時方程式
以下ではこの3つについて説明し,シミュレーションで発生するバイアスを確認する。
その前に,仮定4が満たされる場合と満たされない場合の違いをシミュレーションを通して直感的に理解する。
## シミュレーション
### 準備
パラメータの真の値を設定する。
```
b0=1.0
b1=1.0
```
シミュレーションの関数を作成
```
def sim_a4(n, m): # n=標本の大きさ, m=共分散
rv_mean = [4, 0] # x, uの平均
rv_cov = [[1.0, m], # x, uの共分散行列
[m, 0.1]] # Cov(x,u)=m
rv = multivariate_normal.rvs(rv_mean, rv_cov, size=n) # x1, uをnセット抽出
x = rv[:,0] # 説明変数
u = rv[:,1] # 誤差項
y = b0 + b1*x + u # 説明変数
df = pd.DataFrame({'Y':y, 'X':x}) # DataFrameの作成
res = ols(formula='Y ~ X', data=df).fit() # OLS推定
return x, y, u, res.params[0], res.params[1] # 返り値の設定
```
ここで重要な役割を果たすのが説明変数と誤差項の共分散を表す`m`である。
### 仮定4が満たされる場合:$m=0$
```
x_0, y_0, u_0, b0hat_0, b1hat_0 = sim_a4(100, m=0)
```
母集団の誤差項$u$と説明変数$x$
```
plt.scatter(x_0,u_0)
plt.axhline(0)
pass
```
共分散を計算してみよう。
```
np.corrcoef(x_0,u_0)
```
共分散は非常に小さな数字であり,特定な傾向がない。次に,母集団回帰直線とOLS回帰直線を比べてみる。
```
xx = np.linspace(min(x_0),max(x_0),100)
plt.scatter(x_0,y_0)
plt.plot(xx,b0hat_0+b1hat_0*xx, 'k', label='OLS')
plt.plot(xx,b0+b1*xx,'r', label='Pop Regression Line')
plt.legend()
pass
```
推定量が不偏性を満たしているため両方は殆ど同じである。
### 仮定4が満たされない場合:$m\neq 0$
```
x_1, y_1, u_1, b0hat_1, b1hat_1 = sim_a4(100, m=0.25)
```
母集団の誤差項$u$と説明変数$x$を図示しよう。
```
plt.scatter(x_1,u_1)
plt.axhline(0)
pass
np.corrcoef(x_1,u_1)
```
強い正の共分散が確認できる。母集団回帰線とOLS回帰直線を比べる。
```
xx = np.linspace(min(x_1),max(x_1),100)
plt.scatter(x_1,y_1)
plt.plot(xx,b0hat_1+b1hat_1*xx, 'k', label='OLS')
plt.plot(xx,b0+b1*xx, 'r', label='Pop Regression Line')
plt.legend()
pass
```
明らかに異なる。GM仮定4が満たされず,推定量の不偏性が満たされないためである。この場合,一致性も満たされない。標本の大きさ`n`を`100000`にして確かめてみる。
```
x_1, y_1, u_1, b0hat_1, b1hat_1 = sim_a4(100_000, m=0.25)
xx = np.linspace(min(x_1),max(x_1),100)
plt.scatter(x_1,y_1, alpha=0.1) # 下の説明を参照
plt.plot(xx,b0hat_1+b1hat_1*xx, 'k', label='OLS')
plt.plot(xx,b0+b1*xx,'r', label='Pop Regression Line')
plt.legend()
pass
```
上から3行目に`alpha=0.1`とあるが,散布図のマーカーの透明度をしている引数であり`0~1`の値をとる。デフォルトは`1`であり,`0`は透明になる。
## 欠落変数
### 欠落変数バイアス
母集団のモデルは以下で与えられるとしよう。
$$y=\beta_0+\beta_1 x_1+\beta_2x_2+\beta_3x_3+u\qquad\quad(\text{母集団モデル})$$
実際に推定されるモデル($x_3$が欠落):
$$y=\gamma_0+\gamma_1 x_1+\gamma_2x_2+\eta\qquad\quad(\text{推定モデル})$$
更に,$x_1$と$x_3$には以下の関係があると仮定する。
$$x_3=\delta_0+\delta_1 x_1+\epsilon\qquad\quad(\text{仮定})$$
この式を母集団モデルに代入すると:
$$y=(\beta_0+\beta_3\delta_0)+(\beta_1+\beta_3\delta_1)x_1+\beta_2x_2+\tilde{\eta}$$
ここで,$\tilde{\eta}\equiv\beta_3\epsilon+u$。推定モデルはこの関係に基づいて係数を計算することになる。
**(結果1)**
推定モデルを使うと
* $\hat{\gamma}_0$は$\beta_0+\beta_3\delta_0$の推定値
* $\hat{\gamma}_1$は$\beta_1+\beta_3\delta_1$の推定値
* $\hat{\gamma}_2$は$\beta_2$の推定値
となり$\gamma_0$と$\gamma_1$の推定値にバイアスが発生する。一方,$\hat{\gamma}_2$にはバイアスは発生しない。欠落変数と無相関の変数(定数以外)にはバイアスは発生しないためである。
**(結果2)**
* 欠落変数が回帰式の説明変数と無相関であれば,バイアスは発生しない!
**(理由)**
GM仮定4が満たされていないため。母集団モデル,推定モデルと仮定の式から
$$
\eta=\beta_3x_3+u=\beta_3(\delta_0+\delta_1x_1+\epsilon)+u
\quad\Rightarrow\quad
\tilde{\eta}=\eta-\beta_3(\delta_0+\delta_1x_1)
$$
これから$x_1$と$\tilde{\eta}$は相関することがわかる。
### シミュレーション1:推定値の計算
One-shotのシミュレーションをおこないバイアスの発生を確認する。
```
# 母集団のパラメータ
b0 = 1
b1 = 1
b2 = 1
b3 = 1
# 標本数
n = 1000
# 説明変数 x1, x2
x1 = np.random.normal(4, 1, size=n)
x2 = np.random.uniform(1, 10, size=n)
# 説明変数 x3
e = np.random.normal(size=n)
m = 2.0 # x1との相関性を捉える
x3 = 1 + m*x1 + e
# 母集団の誤差項
u = np.random.normal(size=n)
y = b0 + b1*x1 + b2*x2 + b3*x3 + u
# 標本のDataFrame
df_sample = pd.DataFrame({'Y':y, 'X1':x1, 'X2':x2, 'X3':x3})
# 推定
formula_omitted = 'Y ~ X1 + X2'
res_omitted = ols(formula_omitted, data=df_sample).fit()
res_omitted.params
```
`X1`のパラメータの値は真の値と明らかに異なる。
### シミュレーション2:推定値の分布
`OLS`推定量の分布を確認する。
```
# 母集団のパラメータの値
b0 = 1
b1 = 1
b2 = 1
b3 = 1
# シミュレーションの回数
N = 10_000
```
シミュレーションの関数を設定する。
* 引数:
* `n`:標本の大きさ
* `m`:`x1`と`x3`の相関を捉えるパラメータ
* 戻り値:推定値のリスト
```
@njit
def sim_omitted(n,m):
# 空のリスト
b0hat_arr = np.zeros(N)
b1hat_arr = np.zeros(N)
b2hat_arr = np.zeros(N)
# 説明変数
x1 = np.random.normal(loc=4, scale=1, size=n)
x2 = np.random.uniform(1, 10, size=n)
e = np.random.normal(loc=0, scale=1, size=n)
x3 = 1 + m*x1 + e
c = np.ones(n) # 定数項
for i in range(N): # N回のループ
# 母集団
u = np.random.normal(loc=0, scale=1, size=n)
y = b0 + b1*x1 + b2*x2 + b3*x3 + u
X = np.stack((c,x1,x2), axis=1) # 説明変数の行列
bhat = np.linalg.inv((X.T)@X)@(X.T)@y # OLS推定
b0hat_arr[i] = bhat[0]
b1hat_arr[i] = bhat[1]
b2hat_arr[i] = bhat[2]
return b0hat_arr, b1hat_arr, b2hat_arr
```
シミュレーションの開始
```
b0hat, b1hat, b2hat = sim_omitted(1000,m=2.0)
```
$\hat{\beta}_0$の分布
```
plt.hist(b0hat, bins=30)
plt.axvline(x=b0, color='red')
pass
```
$\hat{\beta}_1$の分布
```
plt.hist(b1hat, bins=30)
plt.axvline(x=b1, color='red')
pass
```
$\hat{\beta}_2$の分布
```
plt.hist(b2hat, bins=30)
plt.axvline(x=b2, color='red')
pass
```
## 測定誤差
### 測定誤差によるバイアス
次の母集団回帰式を考えよう。
$$y=\beta_0+\beta_1 x^* + \eta$$
ここで,被説明変数$y$は正確に測定できるが,説明変数$x^*$は以下の式に従って測定される仮定する。
$$x=x^*+e$$
* $x$:測定値
* $e$:測定誤差
(仮定)
* 測定誤差$e$は真の値と無関係。即ち,$\text{Cov}(x^*,e)=0$
(結果)
* 次式をOLS推定する場合,$\hat{\beta}_1$は不偏性・一致性を満たさない。
$$y=\beta_0+\beta_1 x + u,\qquad u=\eta-\beta e$$
(理由)
仮定4:$\text{Cov}(x,u)=0$が満たされない。
$$
\text{Cov}(x,u)=\text{E}[xu]
=\text{E}\left[(x^*+e)(\eta-\beta e)\right]
=-\beta\cdot\text{E}(e^2)>0
$$
### シミュレーション1:推定値の計算
One-shotのシミュレーションをおこないバイアスの発生を確認する。
```
# 標本の大きさ
n = 100
# 母集団回帰式
b0 = 1.0
b1 = 1.0
x_pop = np.random.uniform(1,10,size=n) # 母集団の説明変数
u = np.random.normal(scale=1, size=n) # 母集団の誤差項
y = b0 + b1*x_pop + u # 母集団回帰式
# 測定誤差
error_sd = 3 # 測定誤差の標準偏差
x = x_pop + np.random.normal(scale=error_sd, size=n) # 測定誤差
# OLS推定
df_sim = pd.DataFrame({'Y':y, 'X':x}) # DataFrameの作成
res_ols = ols('Y ~ X', data=df_sim).fit() # OLS推定
res_ols.params # OLS推定量
```
### シミュレーション2:推定値の分布
`OLS`推定量の分布を確認する。
```
# 真のパラメータ
b0 = 1.0
b1 = 1.0
# シミュレーションの回数
N = 100_000
@njit
def sim_measure(n):
b0hat_arr = np.zeros(N)
b1hat_arr = np.zeros(N)
x_pop = np.random.uniform(1,10,size=n) # 母集団の説明変数
c = np.ones(n) # 定数項
# 説明変数の測定誤差
error_sd = 3 # 測定誤差の標準偏差
x = x_pop + np.random.normal(loc=0, scale=error_sd, size=n) # 測定誤差
for i in range(N):
u = np.random.normal(loc=0, scale=1, size=n) # 母集団の誤差項
y = b0 + b1*x_pop + u # 母集団回帰式
X = np.stack((c,x), axis=1) # 説明変数の行列
bhat = np.linalg.inv((X.T)@X)@(X.T)@y # OLS推定
b0hat_arr[i] = bhat[0]
b1hat_arr[i] = bhat[1]
return b0hat_arr, b1hat_arr
```
シミュレーションの開始
```
b0hat, b1hat = sim_measure(100)
```
$\hat{\beta}_0$の分布
```
plt.hist(b0hat,bins=30)
plt.axvline(x=b0,color='red')
pass
```
$\hat{\beta}_1$の分布
```
plt.hist(b1hat,bins=30)
plt.axvline(x=b1,color='red')
pass
```
## 同時方程式
### 同時性バイアス
同時方程式モデルとは,均衡メカニズムなどを通して複数の内生変数が複数の式によって同時決定されるモデルである。例として労働の需給モデルを考えよう。均衡では需要量($L_d$)と供給量($L_s$)は等しくなり($L=L_d=L_s$),需要と供給はそれぞれ均衡賃金($W$)に依存する。
* 労働供給関数
$$ L = s_0+s_1 W + s_2 X_s + u_s\qquad\qquad\qquad\text{(式1)}$$
* $s_1>0$
* $X_s=$ 供給の「その他」の決定要因(例えば,所得水準)
* $u_s=$ 供給の誤差項
* 労働需要関数
$$ W = d_0+d_1 L + d_2 X_d + u_d\qquad\qquad\qquad\text{(式2)}$$
* $d_1<0$
* $X_d=$ 需要の「その他」の決定要因(例えば,教育水準)
* $u_d=$ 需要の誤差項
(相関性の仮定)
* $\text{Cov}(X_s,u_s)=\text{Cov}(X_d,u_d)=\text{Cov}(u_s,u_d)=0$
上の式は**構造方程式(structural equations)**
と呼ばれる。これらの式を直接推定するとOLS推定量にはバイアスが発生する。理由は,供給関数では$\text{Cov}(W,u_s)\neq 0$となり需要関数でも$\text{Cov}(L,u_d)\neq 0$となるためである。即ち,仮定4が満たされないのである。この結果は簡単に示すことができる。2つの式を同時方程式として$L$と$W$の解を計算すると次式を得る。
$$L=\alpha_0+\alpha_1X_d+\alpha_2X_s+\frac{s_1u_d+u_s}{1-d_1s_1}\qquad\qquad\qquad\text{(式3)}$$
$$W=\beta_0+\beta_1X_d+\beta_2X_s+\frac{d_1u_s+u_d}{1-d_1s_1}\qquad\qquad\qquad\text{(式4)}$$
ここで$\alpha_i$,$\beta_i$,$i=0,1,2$は$d_i$,$s_i$,$i=0,1,2$の非線形な形をとることになる。
このような$L$と$W$の解の式は**誘導型方程式(reduced-form equations)**
と呼ばれるが,この式から次の点は明らかである。
* (式3):$L$は$u_d$に依存し,$L$と$u_d$は相関する $\Rightarrow$ $\text{Cov}(L,u_d)\neq 0$
* (式4):$W$も$u_s$に依存し,$W$と$u_s$は相関する $\Rightarrow$ $\text{Cov}(W,u_s)\neq 0$
では誘導型方程式を推定すればどうなるのか?相関性の仮定のもと$\hat{\alpha}_i$,$\hat{\beta}_i$,$i=0,1,2$は不偏性・一致性を保持する。しかし問題は,誘導型方程式を満たす構造方程式は無数にあり,その推定値から$d_i$,$s_i$,$i=0,1,2$を復元できないのである。従って,それらのパラメータの値を推定するためには(式1)と(式2)を推定する必要がある。
### シミュレーション1:推定値の計算
(目的)
One-shotのシミュレーションをおこない構造方程式を直接推定すると推定値にバイアスが発生することを確認する。
シミュレーションの簡単化のために(式2)の中で$s_2=0$を仮定する。これにより,(式3)と(式4)のパラメータは以下で与えられる。
$$
\alpha_0\equiv\frac{s_0+s_1d_0}{1-s_1d_1},\quad
\alpha_1\equiv\frac{s_1d_2}{1-s_1d_1},\quad
\alpha_2=0
$$
$$
\beta_0\equiv\frac{d_0+d_1s_0}{1-s_1d_1},\quad
\beta_1\equiv d_2,\quad
\beta_2=0
$$
(シミュレーション・プラン)
* $s_0$と$d_i$, $i=0,1,2$の値を設定し,(式3)と(式4)に従う抽出データを生成する(標本の大きさ$=n$)
* この標本データを使い(式2)を推定し,推定量のバイアスを確認する。
母集団のパラメータを次のように設定する。
```
s0 = 1.0
s1 = 0.5
d0 = 1.5
d1 = -1
d2 = 2.0
```
標本の大きさ
```
n = 10_000
```
抽出された説明変数と被説明変数(誤差項)
```
xd = np.random.uniform(1,10,size=n) # 説明変数
ud = np.random.normal(loc=0, scale=1, size=n) # 母集団の誤差項
us = np.random.normal(loc=0, scale=2, size=n) # 母集団の誤差項
```
母集団の労働供給曲線
```
l = (s0+s1*d0)/(1-s1*d1) + (s1*d2/(1-s1*d1))*xd + (s1*ud+us)/(1-s1*d1)
```
母集団の労働需要曲線
```
w = (d0+d1*s0)/(1-s1*d1) + d2*xd + (d1*us+ud)/(1-s1*d1)
```
労働需要曲線を使い賃金をOLS推定
```
df_simul = pd.DataFrame({'W':w, 'L':l, 'Xd':xd}) # DataFrameの作成
res_ols = ols('W ~ L + Xd', data=df_simul).fit() # OLS推定
res_ols.params # OLS推定量
```
分かりやすく表示する。
```
print(f'd0の真の値:{d0}\t\td0の推定量:{res_ols.params[0]}')
print(f'd1の真の値:{d1}\t\td1の推定量:{res_ols.params[1]}')
print(f'd2の真の値:{d2}\t\td2の推定量:{res_ols.params[2]}')
```
標本の大きさが非常に大きくてもバイアスは消えない。
### シミュレーション2:推定値の分布
`OLS`推定量の分布を確認する。シミュレーション1と同じパラメータの真の値を使う。
```
s0 = 1.0
s1 = 0.5
d0 = 1.5
d1 = -1
d2 = 2.0
```
シミュレーションの回数
```
N = 100_000
```
シミュレーションの関数を設定する。
```
@njit
def sim_simul(n):
b0hat_arr = np.zeros(N)
b1hat_arr = np.zeros(N)
b2hat_arr = np.zeros(N)
xd = np.random.uniform(1, 10, size=n) # 説明変数
c = np.ones(n) # 定数項
for i in range(N):
# 母集団の誤差項
ud = np.random.normal(loc=0, scale=1, size=n)
# 母集団の誤差項
us = np.random.normal(loc=0, scale=2, size=n)
# 母集団の労働供給曲線
l = (s0+s1*d0)/(1-s1*d1) + (s1*d2/(1-s1*d1))*xd + (s1*ud+us)/(1-s1*d1)
# 母集団の労働需要曲線
w = (d0+d1*s0)/(1-s1*d1) + d2*xd + (d1*us+ud)/(1-s1*d1)
# 説明変数の行列
X = np.stack((c,l,xd), axis=1)
# 労働需要曲線を使い賃金をOLS推定
bhat = np.linalg.inv((X.T)@X)@(X.T)@w # OLS推定
b0hat_arr[i] = bhat[0]
b1hat_arr[i] = bhat[1]
b2hat_arr[i] = bhat[2]
return b0hat_arr, b1hat_arr, b2hat_arr
```
シミュレーションの開始
```
b0hat, b1hat, b2hat = sim_simul(100)
```
$\hat{\beta}_0$の分布
```
plt.hist(b0hat,bins=30)
plt.axvline(x=d0,color='red')
pass
```
$\hat{\beta}_1$の分布
```
plt.hist(b1hat,bins=30)
plt.axvline(x=d1,color='red')
pass
```
$\hat{\beta}_2$の分布
```
plt.hist(b2hat,bins=30)
plt.axvline(x=d2,color='red')
pass
```
|
github_jupyter
|
from scipy.stats import multivariate_normal, norm, uniform
import numpy as np
import pandas as pd
from statsmodels.formula.api import ols
import matplotlib.pyplot as plt
from numba import njit
b0=1.0
b1=1.0
def sim_a4(n, m): # n=標本の大きさ, m=共分散
rv_mean = [4, 0] # x, uの平均
rv_cov = [[1.0, m], # x, uの共分散行列
[m, 0.1]] # Cov(x,u)=m
rv = multivariate_normal.rvs(rv_mean, rv_cov, size=n) # x1, uをnセット抽出
x = rv[:,0] # 説明変数
u = rv[:,1] # 誤差項
y = b0 + b1*x + u # 説明変数
df = pd.DataFrame({'Y':y, 'X':x}) # DataFrameの作成
res = ols(formula='Y ~ X', data=df).fit() # OLS推定
return x, y, u, res.params[0], res.params[1] # 返り値の設定
x_0, y_0, u_0, b0hat_0, b1hat_0 = sim_a4(100, m=0)
plt.scatter(x_0,u_0)
plt.axhline(0)
pass
np.corrcoef(x_0,u_0)
xx = np.linspace(min(x_0),max(x_0),100)
plt.scatter(x_0,y_0)
plt.plot(xx,b0hat_0+b1hat_0*xx, 'k', label='OLS')
plt.plot(xx,b0+b1*xx,'r', label='Pop Regression Line')
plt.legend()
pass
x_1, y_1, u_1, b0hat_1, b1hat_1 = sim_a4(100, m=0.25)
plt.scatter(x_1,u_1)
plt.axhline(0)
pass
np.corrcoef(x_1,u_1)
xx = np.linspace(min(x_1),max(x_1),100)
plt.scatter(x_1,y_1)
plt.plot(xx,b0hat_1+b1hat_1*xx, 'k', label='OLS')
plt.plot(xx,b0+b1*xx, 'r', label='Pop Regression Line')
plt.legend()
pass
x_1, y_1, u_1, b0hat_1, b1hat_1 = sim_a4(100_000, m=0.25)
xx = np.linspace(min(x_1),max(x_1),100)
plt.scatter(x_1,y_1, alpha=0.1) # 下の説明を参照
plt.plot(xx,b0hat_1+b1hat_1*xx, 'k', label='OLS')
plt.plot(xx,b0+b1*xx,'r', label='Pop Regression Line')
plt.legend()
pass
# 母集団のパラメータ
b0 = 1
b1 = 1
b2 = 1
b3 = 1
# 標本数
n = 1000
# 説明変数 x1, x2
x1 = np.random.normal(4, 1, size=n)
x2 = np.random.uniform(1, 10, size=n)
# 説明変数 x3
e = np.random.normal(size=n)
m = 2.0 # x1との相関性を捉える
x3 = 1 + m*x1 + e
# 母集団の誤差項
u = np.random.normal(size=n)
y = b0 + b1*x1 + b2*x2 + b3*x3 + u
# 標本のDataFrame
df_sample = pd.DataFrame({'Y':y, 'X1':x1, 'X2':x2, 'X3':x3})
# 推定
formula_omitted = 'Y ~ X1 + X2'
res_omitted = ols(formula_omitted, data=df_sample).fit()
res_omitted.params
# 母集団のパラメータの値
b0 = 1
b1 = 1
b2 = 1
b3 = 1
# シミュレーションの回数
N = 10_000
@njit
def sim_omitted(n,m):
# 空のリスト
b0hat_arr = np.zeros(N)
b1hat_arr = np.zeros(N)
b2hat_arr = np.zeros(N)
# 説明変数
x1 = np.random.normal(loc=4, scale=1, size=n)
x2 = np.random.uniform(1, 10, size=n)
e = np.random.normal(loc=0, scale=1, size=n)
x3 = 1 + m*x1 + e
c = np.ones(n) # 定数項
for i in range(N): # N回のループ
# 母集団
u = np.random.normal(loc=0, scale=1, size=n)
y = b0 + b1*x1 + b2*x2 + b3*x3 + u
X = np.stack((c,x1,x2), axis=1) # 説明変数の行列
bhat = np.linalg.inv((X.T)@X)@(X.T)@y # OLS推定
b0hat_arr[i] = bhat[0]
b1hat_arr[i] = bhat[1]
b2hat_arr[i] = bhat[2]
return b0hat_arr, b1hat_arr, b2hat_arr
b0hat, b1hat, b2hat = sim_omitted(1000,m=2.0)
plt.hist(b0hat, bins=30)
plt.axvline(x=b0, color='red')
pass
plt.hist(b1hat, bins=30)
plt.axvline(x=b1, color='red')
pass
plt.hist(b2hat, bins=30)
plt.axvline(x=b2, color='red')
pass
# 標本の大きさ
n = 100
# 母集団回帰式
b0 = 1.0
b1 = 1.0
x_pop = np.random.uniform(1,10,size=n) # 母集団の説明変数
u = np.random.normal(scale=1, size=n) # 母集団の誤差項
y = b0 + b1*x_pop + u # 母集団回帰式
# 測定誤差
error_sd = 3 # 測定誤差の標準偏差
x = x_pop + np.random.normal(scale=error_sd, size=n) # 測定誤差
# OLS推定
df_sim = pd.DataFrame({'Y':y, 'X':x}) # DataFrameの作成
res_ols = ols('Y ~ X', data=df_sim).fit() # OLS推定
res_ols.params # OLS推定量
# 真のパラメータ
b0 = 1.0
b1 = 1.0
# シミュレーションの回数
N = 100_000
@njit
def sim_measure(n):
b0hat_arr = np.zeros(N)
b1hat_arr = np.zeros(N)
x_pop = np.random.uniform(1,10,size=n) # 母集団の説明変数
c = np.ones(n) # 定数項
# 説明変数の測定誤差
error_sd = 3 # 測定誤差の標準偏差
x = x_pop + np.random.normal(loc=0, scale=error_sd, size=n) # 測定誤差
for i in range(N):
u = np.random.normal(loc=0, scale=1, size=n) # 母集団の誤差項
y = b0 + b1*x_pop + u # 母集団回帰式
X = np.stack((c,x), axis=1) # 説明変数の行列
bhat = np.linalg.inv((X.T)@X)@(X.T)@y # OLS推定
b0hat_arr[i] = bhat[0]
b1hat_arr[i] = bhat[1]
return b0hat_arr, b1hat_arr
b0hat, b1hat = sim_measure(100)
plt.hist(b0hat,bins=30)
plt.axvline(x=b0,color='red')
pass
plt.hist(b1hat,bins=30)
plt.axvline(x=b1,color='red')
pass
s0 = 1.0
s1 = 0.5
d0 = 1.5
d1 = -1
d2 = 2.0
n = 10_000
xd = np.random.uniform(1,10,size=n) # 説明変数
ud = np.random.normal(loc=0, scale=1, size=n) # 母集団の誤差項
us = np.random.normal(loc=0, scale=2, size=n) # 母集団の誤差項
l = (s0+s1*d0)/(1-s1*d1) + (s1*d2/(1-s1*d1))*xd + (s1*ud+us)/(1-s1*d1)
w = (d0+d1*s0)/(1-s1*d1) + d2*xd + (d1*us+ud)/(1-s1*d1)
df_simul = pd.DataFrame({'W':w, 'L':l, 'Xd':xd}) # DataFrameの作成
res_ols = ols('W ~ L + Xd', data=df_simul).fit() # OLS推定
res_ols.params # OLS推定量
print(f'd0の真の値:{d0}\t\td0の推定量:{res_ols.params[0]}')
print(f'd1の真の値:{d1}\t\td1の推定量:{res_ols.params[1]}')
print(f'd2の真の値:{d2}\t\td2の推定量:{res_ols.params[2]}')
s0 = 1.0
s1 = 0.5
d0 = 1.5
d1 = -1
d2 = 2.0
N = 100_000
@njit
def sim_simul(n):
b0hat_arr = np.zeros(N)
b1hat_arr = np.zeros(N)
b2hat_arr = np.zeros(N)
xd = np.random.uniform(1, 10, size=n) # 説明変数
c = np.ones(n) # 定数項
for i in range(N):
# 母集団の誤差項
ud = np.random.normal(loc=0, scale=1, size=n)
# 母集団の誤差項
us = np.random.normal(loc=0, scale=2, size=n)
# 母集団の労働供給曲線
l = (s0+s1*d0)/(1-s1*d1) + (s1*d2/(1-s1*d1))*xd + (s1*ud+us)/(1-s1*d1)
# 母集団の労働需要曲線
w = (d0+d1*s0)/(1-s1*d1) + d2*xd + (d1*us+ud)/(1-s1*d1)
# 説明変数の行列
X = np.stack((c,l,xd), axis=1)
# 労働需要曲線を使い賃金をOLS推定
bhat = np.linalg.inv((X.T)@X)@(X.T)@w # OLS推定
b0hat_arr[i] = bhat[0]
b1hat_arr[i] = bhat[1]
b2hat_arr[i] = bhat[2]
return b0hat_arr, b1hat_arr, b2hat_arr
b0hat, b1hat, b2hat = sim_simul(100)
plt.hist(b0hat,bins=30)
plt.axvline(x=d0,color='red')
pass
plt.hist(b1hat,bins=30)
plt.axvline(x=d1,color='red')
pass
plt.hist(b2hat,bins=30)
plt.axvline(x=d2,color='red')
pass
| 0.435061 | 0.868437 |
<a href="https://colab.research.google.com/github/AnilOsmanTur/Spatio-Temporal-Event-Prediction/blob/main/Prediction_with_HMM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Prediction with HMM
```
!pip install hmmlearn
import numpy as np
from hmmlearn import hmm
np.random.seed(42) # to make the code reproducable
print('imports done')
```
## Data Loading and Splitting
```
data = np.load('data.npy')
n_sample = data.shape[0]
flat_data = data.reshape(n_sample, -1)
print('data shape after flattening', flat_data.shape)
n_train = int(0.8 * n_sample )
n_test = n_sample - n_train
train_data = flat_data[:n_train]
test_data = flat_data[n_train:]
print('Training data split', train_data.shape)
print('Testing data split', test_data.shape)
```
## Model Training
```
# model creation and training
model = hmm.GaussianHMM(n_components=32,
covariance_type="full",
n_iter=100,
verbose=True)
model.fit(train_data)
```
## Prediction with model
```
# Prediction function to predict next data point
def predict_next(model, known_data):
state_sequence = model.predict(known_data)
prob_next_step = model.transmat_[state_sequence[-1], :]
t1 = model._generate_sample_from_state(np.argmax(prob_next_step))
t1 = 1/(1 + np.exp(-t1)) # sigmoid to get probablity score like resutls
return t1
# Distance functions to measure distance between target and prediction
def hamming_dist(a, b):
return np.count_nonzero(a!=b)
def euclidian_dist(a, b):
return np.linalg.norm(a-b)
t1 = (predict_next(model, train_data[:5]) > 0.5).astype(np.float)
t1
label = train_data[5]
label
euclidian_dist(t1, label)
hamming_dist(t1, label)
```
## Test Case Generations
```
test_idx = np.random.randint(5, n_test, size=100)
start_idx = test_idx-5
test_cases = [test_data[start:end] for start, end in zip(start_idx,test_idx)]
test_labels = test_data[test_idx]
print(len(test_cases))
print(test_labels.shape)
dists = []
for label, a_case in zip(test_labels, test_cases):
t1 = (predict_next(model, a_case) > 0.5).astype(np.float)
e_dist = euclidian_dist(t1, label)
h_dist = hamming_dist(t1, label)
dists.append([e_dist, h_dist])
print('prediction done')
%matplotlib inline
import matplotlib.pyplot as plt
dists = np.array(dists)
print('Average distances:')
d_means = np.mean(dists, axis=0)
print('Euclidian:', d_means[0])
print('Hamming :', d_means[1])
plt.plot(dists)
```
|
github_jupyter
|
!pip install hmmlearn
import numpy as np
from hmmlearn import hmm
np.random.seed(42) # to make the code reproducable
print('imports done')
data = np.load('data.npy')
n_sample = data.shape[0]
flat_data = data.reshape(n_sample, -1)
print('data shape after flattening', flat_data.shape)
n_train = int(0.8 * n_sample )
n_test = n_sample - n_train
train_data = flat_data[:n_train]
test_data = flat_data[n_train:]
print('Training data split', train_data.shape)
print('Testing data split', test_data.shape)
# model creation and training
model = hmm.GaussianHMM(n_components=32,
covariance_type="full",
n_iter=100,
verbose=True)
model.fit(train_data)
# Prediction function to predict next data point
def predict_next(model, known_data):
state_sequence = model.predict(known_data)
prob_next_step = model.transmat_[state_sequence[-1], :]
t1 = model._generate_sample_from_state(np.argmax(prob_next_step))
t1 = 1/(1 + np.exp(-t1)) # sigmoid to get probablity score like resutls
return t1
# Distance functions to measure distance between target and prediction
def hamming_dist(a, b):
return np.count_nonzero(a!=b)
def euclidian_dist(a, b):
return np.linalg.norm(a-b)
t1 = (predict_next(model, train_data[:5]) > 0.5).astype(np.float)
t1
label = train_data[5]
label
euclidian_dist(t1, label)
hamming_dist(t1, label)
test_idx = np.random.randint(5, n_test, size=100)
start_idx = test_idx-5
test_cases = [test_data[start:end] for start, end in zip(start_idx,test_idx)]
test_labels = test_data[test_idx]
print(len(test_cases))
print(test_labels.shape)
dists = []
for label, a_case in zip(test_labels, test_cases):
t1 = (predict_next(model, a_case) > 0.5).astype(np.float)
e_dist = euclidian_dist(t1, label)
h_dist = hamming_dist(t1, label)
dists.append([e_dist, h_dist])
print('prediction done')
%matplotlib inline
import matplotlib.pyplot as plt
dists = np.array(dists)
print('Average distances:')
d_means = np.mean(dists, axis=0)
print('Euclidian:', d_means[0])
print('Hamming :', d_means[1])
plt.plot(dists)
| 0.454714 | 0.981113 |
## Work
1. 請比較 SGD optimizer 不同的 momentum 及使用 nesterov 與否的表現
```
import os
import keras
# Disable GPU
os.environ["CUDA_VISIBLE_DEVICES"] = ""
train, test = keras.datasets.cifar10.load_data()
## 資料前處理
def preproc_x(x, flatten=True):
x = x / 255.
if flatten:
x = x.reshape((len(x), -1))
return x
def preproc_y(y, num_classes=10):
if y.shape[-1] == 1:
y = keras.utils.to_categorical(y, num_classes)
return y
x_train, y_train = train
x_test, y_test = test
# Preproc the inputs
x_train = preproc_x(x_train)
x_test = preproc_x(x_test)
# Preprc the outputs
y_train = preproc_y(y_train)
y_test = preproc_y(y_test)
def build_mlp(input_shape, output_units=10, num_neurons=[512, 256, 128, 64, 32]):
input_layer = keras.layers.Input(input_shape)
for i, n_units in enumerate(num_neurons):
if i == 0:
x = keras.layers.Dense(units=n_units, activation="relu", name="hidden_layer"+str(i+1))(input_layer)
else:
x = keras.layers.Dense(units=n_units, activation="relu", name="hidden_layer"+str(i+1))(x)
out = keras.layers.Dense(units=output_units, activation="softmax", name="output")(x)
model = keras.models.Model(inputs=[input_layer], outputs=[out])
return model
## 超參數設定
"""
Set your hyper-parameters
"""
LEARNING_RATE = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5]
EPOCHS = 5
BATCH_SIZE = 256
MOMENTUM = 0.1
results = {}
"""
Build the experiment loop
"""
for lr in LEARNING_RATE:
keras.backend.clear_session() # 把舊的 Graph 清掉
print("Experiment with LR = %.6f" % (lr))
model = build_mlp(input_shape=x_train.shape[1:])
model.summary()
optimizer = keras.optimizers.SGD(lr=lr, nesterov=True, momentum=MOMENTUM)
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer)
model.fit(x_train, y_train,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
shuffle=True)
# Collect results
train_loss = model.history.history["loss"]
valid_loss = model.history.history["val_loss"]
train_acc = model.history.history["acc"]
valid_acc = model.history.history["val_acc"]
exp_name_tag = "exp-lr-%s" % str(lr)
results[exp_name_tag] = {'train-loss': train_loss,
'valid-loss': valid_loss,
'train-acc': train_acc,
'valid-acc': valid_acc}
import matplotlib.pyplot as plt
%matplotlib inline
color_bar = ["r", "g", "b", "y", "m", "k"]
plt.figure(figsize=(8,6))
for i, cond in enumerate(results.keys()):
plt.plot(range(len(results[cond]['train-loss'])),results[cond]['train-loss'], '-', label=cond, color=color_bar[i])
plt.plot(range(len(results[cond]['valid-loss'])),results[cond]['valid-loss'], '--', label=cond, color=color_bar[i])
plt.title("Loss")
plt.legend()
plt.show()
plt.figure(figsize=(8,6))
for i, cond in enumerate(results.keys()):
plt.plot(range(len(results[cond]['train-acc'])),results[cond]['train-acc'], '-', label=cond, color=color_bar[i])
plt.plot(range(len(results[cond]['valid-acc'])),results[cond]['valid-acc'], '--', label=cond, color=color_bar[i])
plt.title("Accuracy")
plt.legend()
plt.show()
results = {}
"""
Build the experiment loop
"""
for lr in LEARNING_RATE:
keras.backend.clear_session() # 把舊的 Graph 清掉
print("Experiment with LR = %.6f" % (lr))
model = build_mlp(input_shape=x_train.shape[1:])
model.summary()
optimizer = keras.optimizers.SGD(lr=lr, nesterov=False, momentum=MOMENTUM)
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer)
model.fit(x_train, y_train,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
shuffle=True)
# Collect results
train_loss = model.history.history["loss"]
valid_loss = model.history.history["val_loss"]
train_acc = model.history.history["acc"]
valid_acc = model.history.history["val_acc"]
exp_name_tag = "exp-lr-%s" % str(lr)
results[exp_name_tag] = {'train-loss': train_loss,
'valid-loss': valid_loss,
'train-acc': train_acc,
'valid-acc': valid_acc}
import matplotlib.pyplot as plt
%matplotlib inline
color_bar = ["r", "g", "b", "y", "m", "k"]
plt.figure(figsize=(8,6))
for i, cond in enumerate(results.keys()):
plt.plot(range(len(results[cond]['train-loss'])),results[cond]['train-loss'], '-', label=cond, color=color_bar[i])
plt.plot(range(len(results[cond]['valid-loss'])),results[cond]['valid-loss'], '--', label=cond, color=color_bar[i])
plt.title("Loss")
plt.legend()
plt.show()
plt.figure(figsize=(8,6))
for i, cond in enumerate(results.keys()):
plt.plot(range(len(results[cond]['train-acc'])),results[cond]['train-acc'], '-', label=cond, color=color_bar[i])
plt.plot(range(len(results[cond]['valid-acc'])),results[cond]['valid-acc'], '--', label=cond, color=color_bar[i])
plt.title("Accuracy")
plt.legend()
plt.show()
```
|
github_jupyter
|
import os
import keras
# Disable GPU
os.environ["CUDA_VISIBLE_DEVICES"] = ""
train, test = keras.datasets.cifar10.load_data()
## 資料前處理
def preproc_x(x, flatten=True):
x = x / 255.
if flatten:
x = x.reshape((len(x), -1))
return x
def preproc_y(y, num_classes=10):
if y.shape[-1] == 1:
y = keras.utils.to_categorical(y, num_classes)
return y
x_train, y_train = train
x_test, y_test = test
# Preproc the inputs
x_train = preproc_x(x_train)
x_test = preproc_x(x_test)
# Preprc the outputs
y_train = preproc_y(y_train)
y_test = preproc_y(y_test)
def build_mlp(input_shape, output_units=10, num_neurons=[512, 256, 128, 64, 32]):
input_layer = keras.layers.Input(input_shape)
for i, n_units in enumerate(num_neurons):
if i == 0:
x = keras.layers.Dense(units=n_units, activation="relu", name="hidden_layer"+str(i+1))(input_layer)
else:
x = keras.layers.Dense(units=n_units, activation="relu", name="hidden_layer"+str(i+1))(x)
out = keras.layers.Dense(units=output_units, activation="softmax", name="output")(x)
model = keras.models.Model(inputs=[input_layer], outputs=[out])
return model
## 超參數設定
"""
Set your hyper-parameters
"""
LEARNING_RATE = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5]
EPOCHS = 5
BATCH_SIZE = 256
MOMENTUM = 0.1
results = {}
"""
Build the experiment loop
"""
for lr in LEARNING_RATE:
keras.backend.clear_session() # 把舊的 Graph 清掉
print("Experiment with LR = %.6f" % (lr))
model = build_mlp(input_shape=x_train.shape[1:])
model.summary()
optimizer = keras.optimizers.SGD(lr=lr, nesterov=True, momentum=MOMENTUM)
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer)
model.fit(x_train, y_train,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
shuffle=True)
# Collect results
train_loss = model.history.history["loss"]
valid_loss = model.history.history["val_loss"]
train_acc = model.history.history["acc"]
valid_acc = model.history.history["val_acc"]
exp_name_tag = "exp-lr-%s" % str(lr)
results[exp_name_tag] = {'train-loss': train_loss,
'valid-loss': valid_loss,
'train-acc': train_acc,
'valid-acc': valid_acc}
import matplotlib.pyplot as plt
%matplotlib inline
color_bar = ["r", "g", "b", "y", "m", "k"]
plt.figure(figsize=(8,6))
for i, cond in enumerate(results.keys()):
plt.plot(range(len(results[cond]['train-loss'])),results[cond]['train-loss'], '-', label=cond, color=color_bar[i])
plt.plot(range(len(results[cond]['valid-loss'])),results[cond]['valid-loss'], '--', label=cond, color=color_bar[i])
plt.title("Loss")
plt.legend()
plt.show()
plt.figure(figsize=(8,6))
for i, cond in enumerate(results.keys()):
plt.plot(range(len(results[cond]['train-acc'])),results[cond]['train-acc'], '-', label=cond, color=color_bar[i])
plt.plot(range(len(results[cond]['valid-acc'])),results[cond]['valid-acc'], '--', label=cond, color=color_bar[i])
plt.title("Accuracy")
plt.legend()
plt.show()
results = {}
"""
Build the experiment loop
"""
for lr in LEARNING_RATE:
keras.backend.clear_session() # 把舊的 Graph 清掉
print("Experiment with LR = %.6f" % (lr))
model = build_mlp(input_shape=x_train.shape[1:])
model.summary()
optimizer = keras.optimizers.SGD(lr=lr, nesterov=False, momentum=MOMENTUM)
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer)
model.fit(x_train, y_train,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
shuffle=True)
# Collect results
train_loss = model.history.history["loss"]
valid_loss = model.history.history["val_loss"]
train_acc = model.history.history["acc"]
valid_acc = model.history.history["val_acc"]
exp_name_tag = "exp-lr-%s" % str(lr)
results[exp_name_tag] = {'train-loss': train_loss,
'valid-loss': valid_loss,
'train-acc': train_acc,
'valid-acc': valid_acc}
import matplotlib.pyplot as plt
%matplotlib inline
color_bar = ["r", "g", "b", "y", "m", "k"]
plt.figure(figsize=(8,6))
for i, cond in enumerate(results.keys()):
plt.plot(range(len(results[cond]['train-loss'])),results[cond]['train-loss'], '-', label=cond, color=color_bar[i])
plt.plot(range(len(results[cond]['valid-loss'])),results[cond]['valid-loss'], '--', label=cond, color=color_bar[i])
plt.title("Loss")
plt.legend()
plt.show()
plt.figure(figsize=(8,6))
for i, cond in enumerate(results.keys()):
plt.plot(range(len(results[cond]['train-acc'])),results[cond]['train-acc'], '-', label=cond, color=color_bar[i])
plt.plot(range(len(results[cond]['valid-acc'])),results[cond]['valid-acc'], '--', label=cond, color=color_bar[i])
plt.title("Accuracy")
plt.legend()
plt.show()
| 0.609292 | 0.68839 |
# Seasonality in time series data
Consider the problem of modeling time series data with multiple seasonal components with different periodicities. Let us take the time series $y_t$ and decompose it explicitly to have a level component and two seasonal components.
$$
y_t = \mu_t + \gamma^{(1)}_t + \gamma^{(2)}_t
$$
where $\mu_t$ represents the trend or level, $\gamma^{(1)}_t$ represents a seasonal component with a relatively short period, and $\gamma^{(2)}_t$ represents another seasonal component of longer period. We will have a fixed intercept term for our level and consider both $\gamma^{(2)}_t$ and $\gamma^{(2)}_t$ to be stochastic so that the seasonal patterns can vary over time.
In this notebook, we will generate synthetic data conforming to this model and showcase modeling of the seasonal terms in a few different ways under the unobserved components modeling framework.
```
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
```
### Synthetic data creation
We will create data with multiple seasonal patterns by following equations (3.7) and (3.8) in Durbin and Koopman (2012). We will simulate 300 periods and two seasonal terms parametrized in the frequency domain having periods 10 and 100, respectively, and 3 and 2 number of harmonics, respectively. Further, the variances of their stochastic parts are 4 and 9, respectively.
```
# First we'll simulate the synthetic data
def simulate_seasonal_term(periodicity, total_cycles, noise_std=1.,
harmonics=None):
duration = periodicity * total_cycles
assert duration == int(duration)
duration = int(duration)
harmonics = harmonics if harmonics else int(np.floor(periodicity / 2))
lambda_p = 2 * np.pi / float(periodicity)
gamma_jt = noise_std * np.random.randn((harmonics))
gamma_star_jt = noise_std * np.random.randn((harmonics))
total_timesteps = 100 * duration # Pad for burn in
series = np.zeros(total_timesteps)
for t in range(total_timesteps):
gamma_jtp1 = np.zeros_like(gamma_jt)
gamma_star_jtp1 = np.zeros_like(gamma_star_jt)
for j in range(1, harmonics + 1):
cos_j = np.cos(lambda_p * j)
sin_j = np.sin(lambda_p * j)
gamma_jtp1[j - 1] = (gamma_jt[j - 1] * cos_j
+ gamma_star_jt[j - 1] * sin_j
+ noise_std * np.random.randn())
gamma_star_jtp1[j - 1] = (- gamma_jt[j - 1] * sin_j
+ gamma_star_jt[j - 1] * cos_j
+ noise_std * np.random.randn())
series[t] = np.sum(gamma_jtp1)
gamma_jt = gamma_jtp1
gamma_star_jt = gamma_star_jtp1
wanted_series = series[-duration:] # Discard burn in
return wanted_series
duration = 100 * 3
periodicities = [10, 100]
num_harmonics = [3, 2]
std = np.array([2, 3])
np.random.seed(8678309)
terms = []
for ix, _ in enumerate(periodicities):
s = simulate_seasonal_term(
periodicities[ix],
duration / periodicities[ix],
harmonics=num_harmonics[ix],
noise_std=std[ix])
terms.append(s)
terms.append(np.ones_like(terms[0]) * 10.)
series = pd.Series(np.sum(terms, axis=0))
df = pd.DataFrame(data={'total': series,
'10(3)': terms[0],
'100(2)': terms[1],
'level':terms[2]})
h1, = plt.plot(df['total'])
h2, = plt.plot(df['10(3)'])
h3, = plt.plot(df['100(2)'])
h4, = plt.plot(df['level'])
plt.legend(['total','10(3)','100(2)', 'level'])
plt.show()
```
### Unobserved components (frequency domain modeling)
The next method is an unobserved components model, where the trend is modeled as a fixed intercept and the seasonal components are modeled using trigonometric functions with primary periodicities of 10 and 100, respectively, and number of harmonics 3 and 2, respectively. Note that this is the correct, generating model. The process for the time series can be written as:
$$
\begin{align}
y_t & = \mu_t + \gamma^{(1)}_t + \gamma^{(2)}_t + \epsilon_t\\
\mu_{t+1} & = \mu_t \\
\gamma^{(1)}_{t} &= \sum_{j=1}^2 \gamma^{(1)}_{j, t} \\
\gamma^{(2)}_{t} &= \sum_{j=1}^3 \gamma^{(2)}_{j, t}\\
\gamma^{(1)}_{j, t+1} &= \gamma^{(1)}_{j, t}\cos(\lambda_j) + \gamma^{*, (1)}_{j, t}\sin(\lambda_j) + \omega^{(1)}_{j,t}, ~j = 1, 2, 3\\
\gamma^{*, (1)}_{j, t+1} &= -\gamma^{(1)}_{j, t}\sin(\lambda_j) + \gamma^{*, (1)}_{j, t}\cos(\lambda_j) + \omega^{*, (1)}_{j, t}, ~j = 1, 2, 3\\
\gamma^{(2)}_{j, t+1} &= \gamma^{(2)}_{j, t}\cos(\lambda_j) + \gamma^{*, (2)}_{j, t}\sin(\lambda_j) + \omega^{(2)}_{j,t}, ~j = 1, 2\\
\gamma^{*, (2)}_{j, t+1} &= -\gamma^{(2)}_{j, t}\sin(\lambda_j) + \gamma^{*, (2)}_{j, t}\cos(\lambda_j) + \omega^{*, (2)}_{j, t}, ~j = 1, 2\\
\end{align}
$$
$$
where $\epsilon_t$ is white noise, $\omega^{(1)}_{j,t}$ are i.i.d. $N(0, \sigma^2_1)$, and $\omega^{(2)}_{j,t}$ are i.i.d. $N(0, \sigma^2_2)$, where $\sigma_1 = 2.$
```
model = sm.tsa.UnobservedComponents(series.values,
level='fixed intercept',
freq_seasonal=[{'period': 10,
'harmonics': 3},
{'period': 100,
'harmonics': 2}])
res_f = model.fit(disp=False)
print(res_f.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_f.smoother_results.smoothed_state[0,-1:][0]))
res_f.plot_components()
plt.show()
model.ssm.transition[:, :, 0]
```
Observe that the fitted variances are pretty close to the true variances of 4 and 9. Further, the individual seasonal components look pretty close to the true seasonal components. The smoothed level term is kind of close to the true level of 10. Finally, our diagnostics look solid; the test statistics are small enough to fail to reject our three tests.
### Unobserved components (mixed time and frequency domain modeling)
The second method is an unobserved components model, where the trend is modeled as a fixed intercept and the seasonal components are modeled using 10 constants summing to 0 and trigonometric functions with a primary periodicities of 100 with 2 harmonics total. Note that this is not the generating model, as it presupposes that there are more state errors for the shorter seasonal component than in reality. The process for the time series can be written as:
$$
\begin{align}
y_t & = \mu_t + \gamma^{(1)}_t + \gamma^{(2)}_t + \epsilon_t\\
\mu_{t+1} & = \mu_t \\
\gamma^{(1)}_{t + 1} &= - \sum_{j=1}^9 \gamma^{(1)}_{t + 1 - j} + \omega^{(1)}_t\\
\gamma^{(2)}_{j, t+1} &= \gamma^{(2)}_{j, t}\cos(\lambda_j) + \gamma^{*, (2)}_{j, t}\sin(\lambda_j) + \omega^{(2)}_{j,t}, ~j = 1, 2\\
\gamma^{*, (2)}_{j, t+1} &= -\gamma^{(2)}_{j, t}\sin(\lambda_j) + \gamma^{*, (2)}_{j, t}\cos(\lambda_j) + \omega^{*, (2)}_{j, t}, ~j = 1, 2\\
\end{align}
$$
where $\epsilon_t$ is white noise, $\omega^{(1)}_{t}$ are i.i.d. $N(0, \sigma^2_1)$, and $\omega^{(2)}_{j,t}$ are i.i.d. $N(0, \sigma^2_2)$.
```
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
seasonal=10,
freq_seasonal=[{'period': 100,
'harmonics': 2}])
res_tf = model.fit(disp=False)
print(res_tf.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_tf.smoother_results.smoothed_state[0,-1:][0]))
res_tf.plot_components()
plt.show()
```
The plotted components look good. However, the estimated variance of the second seasonal term is inflated from reality. Additionally, we reject the Ljung-Box statistic, indicating we may have remaining autocorrelation after accounting for our components.
### Unobserved components (lazy frequency domain modeling)
The third method is an unobserved components model with a fixed intercept and one seasonal component, which is modeled using trigonometric functions with primary periodicity 100 and 50 harmonics. Note that this is not the generating model, as it presupposes that there are more harmonics then in reality. Because the variances are tied together, we are not able to drive the estimated covariance of the non-existent harmonics to 0. What is lazy about this model specification is that we have not bothered to specify the two different seasonal components and instead chosen to model them using a single component with enough harmonics to cover both. We will not be able to capture any differences in variances between the two true components. The process for the time series can be written as:
$$
\begin{align}
y_t & = \mu_t + \gamma^{(1)}_t + \epsilon_t\\
\mu_{t+1} &= \mu_t\\
\gamma^{(1)}_{t} &= \sum_{j=1}^{50}\gamma^{(1)}_{j, t}\\
\gamma^{(1)}_{j, t+1} &= \gamma^{(1)}_{j, t}\cos(\lambda_j) + \gamma^{*, (1)}_{j, t}\sin(\lambda_j) + \omega^{(1}_{j,t}, ~j = 1, 2, \dots, 50\\
\gamma^{*, (1)}_{j, t+1} &= -\gamma^{(1)}_{j, t}\sin(\lambda_j) + \gamma^{*, (1)}_{j, t}\cos(\lambda_j) + \omega^{*, (1)}_{j, t}, ~j = 1, 2, \dots, 50\\
\end{align}
$$
where $\epsilon_t$ is white noise, $\omega^{(1)}_{t}$ are i.i.d. $N(0, \sigma^2_1)$.
```
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
freq_seasonal=[{'period': 100}])
res_lf = model.fit(disp=False)
print(res_lf.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_lf.smoother_results.smoothed_state[0,-1:][0]))
res_lf.plot_components()
plt.show()
```
Note that one of our diagnostic tests would be rejected at the .05 level.
### Unobserved components (lazy time domain seasonal modeling)
The fourth method is an unobserved components model with a fixed intercept and a single seasonal component modeled using a time-domain seasonal model of 100 constants. The process for the time series can be written as:
$$
\begin{align}
y_t & =\mu_t + \gamma^{(1)}_t + \epsilon_t\\
\mu_{t+1} &= \mu_{t} \\
\gamma^{(1)}_{t + 1} &= - \sum_{j=1}^{99} \gamma^{(1)}_{t + 1 - j} + \omega^{(1)}_t\\
\end{align}
$$
where $\epsilon_t$ is white noise, $\omega^{(1)}_{t}$ are i.i.d. $N(0, \sigma^2_1)$.
```
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
seasonal=100)
res_lt = model.fit(disp=False)
print(res_lt.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_lt.smoother_results.smoothed_state[0,-1:][0]))
res_lt.plot_components()
plt.show()
```
The seasonal component itself looks good--it is the primary signal. The estimated variance of the seasonal term is very high ($>10^5$), leading to a lot of uncertainty in our one-step-ahead predictions and slow responsiveness to new data, as evidenced by large errors in one-step ahead predictions and observations. Finally, all three of our diagnostic tests were rejected.
### Comparison of filtered estimates
The plots below show that explicitly modeling the individual components results in the filtered state being close to the true state within roughly half a period. The lazy models took longer (almost a full period) to do the same on the combined true state.
```
# Assign better names for our seasonal terms
true_seasonal_10_3 = terms[0]
true_seasonal_100_2 = terms[1]
true_sum = true_seasonal_10_3 + true_seasonal_100_2
time_s = np.s_[:50] # After this they basically agree
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
idx = np.asarray(series.index)
h1, = ax1.plot(idx[time_s], res_f.freq_seasonal[0].filtered[time_s], label='Double Freq. Seas')
h2, = ax1.plot(idx[time_s], res_tf.seasonal.filtered[time_s], label='Mixed Domain Seas')
h3, = ax1.plot(idx[time_s], true_seasonal_10_3[time_s], label='True Seasonal 10(3)')
plt.legend([h1, h2, h3], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth'], loc=2)
plt.title('Seasonal 10(3) component')
plt.show()
time_s = np.s_[:50] # After this they basically agree
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
h21, = ax2.plot(idx[time_s], res_f.freq_seasonal[1].filtered[time_s], label='Double Freq. Seas')
h22, = ax2.plot(idx[time_s], res_tf.freq_seasonal[0].filtered[time_s], label='Mixed Domain Seas')
h23, = ax2.plot(idx[time_s], true_seasonal_100_2[time_s], label='True Seasonal 100(2)')
plt.legend([h21, h22, h23], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth'], loc=2)
plt.title('Seasonal 100(2) component')
plt.show()
time_s = np.s_[:100]
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
h31, = ax3.plot(idx[time_s], res_f.freq_seasonal[1].filtered[time_s] + res_f.freq_seasonal[0].filtered[time_s], label='Double Freq. Seas')
h32, = ax3.plot(idx[time_s], res_tf.freq_seasonal[0].filtered[time_s] + res_tf.seasonal.filtered[time_s], label='Mixed Domain Seas')
h33, = ax3.plot(idx[time_s], true_sum[time_s], label='True Seasonal 100(2)')
h34, = ax3.plot(idx[time_s], res_lf.freq_seasonal[0].filtered[time_s], label='Lazy Freq. Seas')
h35, = ax3.plot(idx[time_s], res_lt.seasonal.filtered[time_s], label='Lazy Time Seas')
plt.legend([h31, h32, h33, h34, h35], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth', 'Lazy Freq. Seas', 'Lazy Time Seas'], loc=1)
plt.title('Seasonal components combined')
plt.show()
```
##### Conclusions
In this notebook, we simulated a time series with two seasonal components of different periods. We modeled them using structural time series models with (a) two frequency domain components of correct periods and numbers of harmonics, (b) time domain seasonal component for the shorter term and a frequency domain term with correct period and number of harmonics, (c) a single frequency domain term with the longer period and full number of harmonics, and (d) a single time domain term with the longer period. We saw a variety of diagnostic results, with only the correct generating model, (a), failing to reject any of the tests. Thus, more flexible seasonal modeling allowing for multiple components with specifiable harmonics can be a useful tool for time series modeling. Finally, we can represent seasonal components with fewer total states in this way, allowing for the user to attempt to make the bias-variance trade-off themselves instead of being forced to choose "lazy" models, which use a large number of states and incur additional variance as a result.
|
github_jupyter
|
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
# First we'll simulate the synthetic data
def simulate_seasonal_term(periodicity, total_cycles, noise_std=1.,
harmonics=None):
duration = periodicity * total_cycles
assert duration == int(duration)
duration = int(duration)
harmonics = harmonics if harmonics else int(np.floor(periodicity / 2))
lambda_p = 2 * np.pi / float(periodicity)
gamma_jt = noise_std * np.random.randn((harmonics))
gamma_star_jt = noise_std * np.random.randn((harmonics))
total_timesteps = 100 * duration # Pad for burn in
series = np.zeros(total_timesteps)
for t in range(total_timesteps):
gamma_jtp1 = np.zeros_like(gamma_jt)
gamma_star_jtp1 = np.zeros_like(gamma_star_jt)
for j in range(1, harmonics + 1):
cos_j = np.cos(lambda_p * j)
sin_j = np.sin(lambda_p * j)
gamma_jtp1[j - 1] = (gamma_jt[j - 1] * cos_j
+ gamma_star_jt[j - 1] * sin_j
+ noise_std * np.random.randn())
gamma_star_jtp1[j - 1] = (- gamma_jt[j - 1] * sin_j
+ gamma_star_jt[j - 1] * cos_j
+ noise_std * np.random.randn())
series[t] = np.sum(gamma_jtp1)
gamma_jt = gamma_jtp1
gamma_star_jt = gamma_star_jtp1
wanted_series = series[-duration:] # Discard burn in
return wanted_series
duration = 100 * 3
periodicities = [10, 100]
num_harmonics = [3, 2]
std = np.array([2, 3])
np.random.seed(8678309)
terms = []
for ix, _ in enumerate(periodicities):
s = simulate_seasonal_term(
periodicities[ix],
duration / periodicities[ix],
harmonics=num_harmonics[ix],
noise_std=std[ix])
terms.append(s)
terms.append(np.ones_like(terms[0]) * 10.)
series = pd.Series(np.sum(terms, axis=0))
df = pd.DataFrame(data={'total': series,
'10(3)': terms[0],
'100(2)': terms[1],
'level':terms[2]})
h1, = plt.plot(df['total'])
h2, = plt.plot(df['10(3)'])
h3, = plt.plot(df['100(2)'])
h4, = plt.plot(df['level'])
plt.legend(['total','10(3)','100(2)', 'level'])
plt.show()
model = sm.tsa.UnobservedComponents(series.values,
level='fixed intercept',
freq_seasonal=[{'period': 10,
'harmonics': 3},
{'period': 100,
'harmonics': 2}])
res_f = model.fit(disp=False)
print(res_f.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_f.smoother_results.smoothed_state[0,-1:][0]))
res_f.plot_components()
plt.show()
model.ssm.transition[:, :, 0]
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
seasonal=10,
freq_seasonal=[{'period': 100,
'harmonics': 2}])
res_tf = model.fit(disp=False)
print(res_tf.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_tf.smoother_results.smoothed_state[0,-1:][0]))
res_tf.plot_components()
plt.show()
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
freq_seasonal=[{'period': 100}])
res_lf = model.fit(disp=False)
print(res_lf.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_lf.smoother_results.smoothed_state[0,-1:][0]))
res_lf.plot_components()
plt.show()
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
seasonal=100)
res_lt = model.fit(disp=False)
print(res_lt.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_lt.smoother_results.smoothed_state[0,-1:][0]))
res_lt.plot_components()
plt.show()
# Assign better names for our seasonal terms
true_seasonal_10_3 = terms[0]
true_seasonal_100_2 = terms[1]
true_sum = true_seasonal_10_3 + true_seasonal_100_2
time_s = np.s_[:50] # After this they basically agree
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
idx = np.asarray(series.index)
h1, = ax1.plot(idx[time_s], res_f.freq_seasonal[0].filtered[time_s], label='Double Freq. Seas')
h2, = ax1.plot(idx[time_s], res_tf.seasonal.filtered[time_s], label='Mixed Domain Seas')
h3, = ax1.plot(idx[time_s], true_seasonal_10_3[time_s], label='True Seasonal 10(3)')
plt.legend([h1, h2, h3], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth'], loc=2)
plt.title('Seasonal 10(3) component')
plt.show()
time_s = np.s_[:50] # After this they basically agree
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
h21, = ax2.plot(idx[time_s], res_f.freq_seasonal[1].filtered[time_s], label='Double Freq. Seas')
h22, = ax2.plot(idx[time_s], res_tf.freq_seasonal[0].filtered[time_s], label='Mixed Domain Seas')
h23, = ax2.plot(idx[time_s], true_seasonal_100_2[time_s], label='True Seasonal 100(2)')
plt.legend([h21, h22, h23], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth'], loc=2)
plt.title('Seasonal 100(2) component')
plt.show()
time_s = np.s_[:100]
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
h31, = ax3.plot(idx[time_s], res_f.freq_seasonal[1].filtered[time_s] + res_f.freq_seasonal[0].filtered[time_s], label='Double Freq. Seas')
h32, = ax3.plot(idx[time_s], res_tf.freq_seasonal[0].filtered[time_s] + res_tf.seasonal.filtered[time_s], label='Mixed Domain Seas')
h33, = ax3.plot(idx[time_s], true_sum[time_s], label='True Seasonal 100(2)')
h34, = ax3.plot(idx[time_s], res_lf.freq_seasonal[0].filtered[time_s], label='Lazy Freq. Seas')
h35, = ax3.plot(idx[time_s], res_lt.seasonal.filtered[time_s], label='Lazy Time Seas')
plt.legend([h31, h32, h33, h34, h35], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth', 'Lazy Freq. Seas', 'Lazy Time Seas'], loc=1)
plt.title('Seasonal components combined')
plt.show()
| 0.741955 | 0.993423 |
```
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 22:05:46 2019
@author: rajgu
"""
import pulp
import pandas as pd
import numpy as np
import os
startTime = pd.datetime.now()
os.chdir("D:\My Personal Documents\Learnings\Data Science\Quantam Stride")
import importlib
import Space_optimisation_config
importlib.reload(Space_optimisation_config)
#print(path_in)
# Instantiate our problem class
optimisation_period_list = Space_optimisation_config.optimisation_period
path_in = Space_optimisation_config.path_in
path_out = Space_optimisation_config.path_out
#data = pd.read_excel(path_in +'Data template - SS_clean_temp.xlsx', sheet_name='Sheet1')
data = pd.read_excel(path_in +'test_data.xlsx')
data.columns = [col.upper().strip().replace(' ', '_').replace('/', '_') for col in data.columns]
data = data.set_index('BUILDING_NAME')
data['LEASE_TERMINATION_DATE'] = pd.to_datetime(data['LEASE_TERMINATION_DATE'], format = '%Y-%m-%d')
data['EARLY_TERMINATION_DATE'] = pd.to_datetime(data['EARLY_TERMINATION_DATE'], errors='coerce')
data.loc[data['EARLY_TERMINATION_DATE'].isnull(), 'EARLY_TERMINATION_DATE'] = \
data.loc[data['EARLY_TERMINATION_DATE'].isnull(), 'LEASE_TERMINATION_DATE']
data['EARLY_TERMINATION_DATE'] = data['EARLY_TERMINATION_DATE'].apply(lambda x: pd.datetime.now().date() if x.year == 2099 else x)
data.loc[data['LEASE_TYPE'].isin(['Owned', 'OWNED']), 'EARLY_TERMINATION_DATE' ] = pd.datetime.now().date()
data.loc[~(data['LEASE_TYPE'].isin(['Owned', 'OWNED'])) & data['EARLY_TERMINATION_DATE'].isnull() & \
data['LEASE_TERMINATION_DATE'].isnull(), 'EARLY_TERMINATION_DATE' ] = pd.datetime.now().date()
data.loc[data['LEASE_TYPE'].isin(['GROSS', 'Gross']), 'OPEX_SQFT' ] = 0
data['TERMINATION_PENALTY_MNTH'] = 3
data.loc[data['LEASE_TYPE'].isin(['Owned', 'OWNED', 'owned']), 'TERMINATION_PENALTY_MNTH' ] = 0
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + d1.month - d2.month
data['PENALTY_PERIOD'] = data['EARLY_TERMINATION_DATE'].apply(lambda x: diff_month(x, pd.datetime.now().date()))
data.loc[data['PENALTY_PERIOD'] < 0, 'PENALTY_PERIOD'] = 0
data['PENALTY_PERIOD'] = data['PENALTY_PERIOD'] + data['TERMINATION_PENALTY_MNTH']
period = 60
for i in range(1, period+1):
data['PERIOD_'+str(i)] = 0
for i in data.index:
if data.loc[i,'PENALTY_PERIOD'] > 0:
period_end = data.loc[i,'PENALTY_PERIOD'] if data.loc[i,'PENALTY_PERIOD'] < 61 else 60
data.loc[i, 'PERIOD_1': 'PERIOD_'+str(period_end) ] = data.loc[i, 'ANNUAL_RENTAL_COST']/12
output = []
output1 = []
for i in range(len(optimisation_period_list)):
optimisation_period = optimisation_period_list[i]
building_status = pulp.LpVariable.dicts("building_status",
(i for i in data.index),
cat='Binary')
seats = pulp.LpVariable.dicts("seats",
(i for i in data.index),
lowBound=0,
cat='Integer')
lease_breakup_status = pulp.LpVariable.dicts("lease_breakup_status",
(i for i in data.index),
cat='Binary')
cost_of_move_in = pulp.LpVariable.dicts("cost_of_move_in",
(i for i in data.index),
lowBound=0,
cat='Integer')
cost_of_move_out = pulp.LpVariable.dicts("cost_of_move_out",
(i for i in data.index),
lowBound=0,
cat='Integer')
model = pulp.LpProblem("Cost minimising Lease problem", pulp.LpMinimize)
model += pulp.lpSum(
[building_status[build] * (data.loc[build , 'ANNUAL_RENTAL_COST']/12)* \
optimisation_period for build in data.index]
+ [lease_breakup_status[build] * (data.loc[build , \
'PERIOD_1':'PERIOD_'+str(optimisation_period)].sum()) for build in data.index]
+ [cost_of_move_in[build] for build in data.index ]
+ [cost_of_move_out[build] for build in data.index]
+ [building_status[build] * (data.loc[build , 'OPEX_SQFT']/12)* \
optimisation_period for build in data.index]
)
for build in data.index:
max_seat = data.loc[build, 'SPACE_OCCUPANCY']
model += seats[build] <= max_seat * building_status[build]
model += building_status[build] + lease_breakup_status[build] ==1
model += cost_of_move_in[build] >= (seats[build] - data.loc[build, 'ASSIGNED_OCCUPANCY']) \
* data.loc[build, 'AVERAGE_MOVEIN_COST']
model += cost_of_move_out[build] >= (data.loc[build, 'ASSIGNED_OCCUPANCY'] - seats[build]) \
* data.loc[build, 'AVERAGE_MOVEOUT_COST']
model += pulp.lpSum(
[seats[build] for build in data.index]) == pulp.lpSum(
[data.loc[build,'ASSIGNED_OCCUPANCY'] for build in data.index])
model.solve()
pulp.LpStatus[model.status]
# output = []
for build in data.index:
var_output = {
'Report Ran Date': pd.datetime.now().date().strftime('%d-%m-%Y'),
'Optimisation Period': str(optimisation_period) + ' months',
'Building': build,
'Building_status': building_status[build].varValue,
'Current number of People': data.loc[build, 'ASSIGNED_OCCUPANCY'],
'Proposed number of People': seats[build].varValue,
'Cost_of_move_in': cost_of_move_in[build].varValue,
'Cost_of_move_out': cost_of_move_out[build].varValue,
'First Year Lease Cost': building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'],
'First Year OPEX Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'],
'First Year Lease break down Cost': lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_1':'PERIOD_12'].sum()),
'Total First Year Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] +\
building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] + \
lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_1':'PERIOD_12'].sum()) +\
cost_of_move_in[build].varValue + cost_of_move_out[build].varValue,
'Second Year Lease Cost': building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'],
'Second Year OPEX Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'],
'Second Year Lease break down Cost': lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_13':'PERIOD_24'].sum()),
'Total Second Year Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] +\
building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] + \
lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_13':'PERIOD_24'].sum()),
'Third Year Lease Cost': building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'],
'Third Year OPEX Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'],
'Third Year Lease break down Cost': lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_25':'PERIOD_36'].sum()),
'Total Third Year Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] +\
building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] + \
lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_25':'PERIOD_36'].sum()),
'Fourth Year Lease Cost': building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'],
'Fourth Year OPEX Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'],
'Fourth Year Lease break down Cost': lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_37':'PERIOD_48'].sum()),
'Total Fourth Year Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] +\
building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] + \
lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_37':'PERIOD_48'].sum()),
'Fifth Year Lease Cost': building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'],
'Fifth Year OPEX Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'],
'Fifth Year Lease break down Cost': lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_49':'PERIOD_60'].sum()),
'Total Fifth Year Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] +\
building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] + \
lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_49':'PERIOD_60'].sum())
}
output.append(var_output)
yr_actual_cost = sum([data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index]) + \
sum([data.loc[build , 'OPEX_SQFT'] for build in data.index])
first_yr_proposed_cost = sum([building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] for build in data.index])+\
sum([building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index])+ \
sum([lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_1':'PERIOD_12'].sum()) +\
cost_of_move_in[build].varValue + cost_of_move_out[build].varValue for build in data.index])
second_yr_proposed_cost = sum([building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] for build in data.index])+\
sum([building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index])+ \
sum([lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_13':'PERIOD_24'].sum()) for build in data.index])
third_yr_proposed_cost = sum([building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] for build in data.index])+\
sum([building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index])+ \
sum([lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_25':'PERIOD_36'].sum()) for build in data.index])
fourth_yr_proposed_cost = sum([building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] for build in data.index])+\
sum([building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index])+ \
sum([lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_37':'PERIOD_48'].sum()) for build in data.index])
fifth_yr_proposed_cost = sum([building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] for build in data.index])+\
sum([building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index])+ \
sum([lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_49':'PERIOD_60'].sum()) for build in data.index])
total_proposed_cost = first_yr_proposed_cost + second_yr_proposed_cost + third_yr_proposed_cost + \
fourth_yr_proposed_cost + fifth_yr_proposed_cost
proposed_cost = [first_yr_proposed_cost , second_yr_proposed_cost , third_yr_proposed_cost , \
fourth_yr_proposed_cost , fifth_yr_proposed_cost, total_proposed_cost ]
current_cost = [yr_actual_cost, yr_actual_cost, yr_actual_cost, yr_actual_cost, yr_actual_cost, yr_actual_cost*5]
period_yr = ['First Year', 'Second Year', 'Third Year', 'Fourth Year', 'Fifth Year', 'Total']
for i,p in enumerate(period_yr):
var_output = {
'Report Ran Date': pd.datetime.now().date().strftime('%d-%m-%Y'),
'Optimisation Period': str(optimisation_period) + ' months',
'Year': p,
'Current Cost': current_cost[i],
'Proposed Cost': proposed_cost[i],
'Saving': current_cost[i] - proposed_cost[i]
}
output1.append(var_output)
output_df = pd.DataFrame.from_records(output)
report_columns = ['Report Ran Date','Optimisation Period','Building', 'Building_status',\
'Current number of People', 'Proposed number of People',\
'Cost_of_move_in', 'Cost_of_move_out', \
'First Year Lease Cost', 'First Year OPEX Cost','First Year Lease break down Cost', \
'Total First Year Cost',\
'Second Year Lease Cost', 'Second Year OPEX Cost','Second Year Lease break down Cost', \
'Total Second Year Cost',\
'Third Year Lease Cost', 'Third Year OPEX Cost','Third Year Lease break down Cost', \
'Total Third Year Cost',\
'Fourth Year Lease Cost', 'Fourth Year OPEX Cost','Fourth Year Lease break down Cost', \
'Total Fourth Year Cost',\
'Fifth Year Lease Cost', 'Fifth Year OPEX Cost','Fifth Year Lease break down Cost', \
'Total Fifth Year Cost']
output_df.to_csv(path_out+'Test Data File1_details.csv', columns= report_columns, index=False)
output1_df = pd.DataFrame.from_records(output1)
report1_columns = ['Report Ran Date','Optimisation Period','Year', 'Current Cost',\
'Proposed Cost', 'Saving']
output1_df.to_csv(path_out+'Test Data File1_summary.csv', columns= report1_columns, index=False)
endTime = pd.datetime.now()
print("Data Size " + str(data.shape))
print("Execution Start Time: " + str(startTime))
print("Execution End Time: " + str(endTime))
data.columns
building_status[build]
building_status[build]
build
data.to_csv("test.csv")
```
|
github_jupyter
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 22:05:46 2019
@author: rajgu
"""
import pulp
import pandas as pd
import numpy as np
import os
startTime = pd.datetime.now()
os.chdir("D:\My Personal Documents\Learnings\Data Science\Quantam Stride")
import importlib
import Space_optimisation_config
importlib.reload(Space_optimisation_config)
#print(path_in)
# Instantiate our problem class
optimisation_period_list = Space_optimisation_config.optimisation_period
path_in = Space_optimisation_config.path_in
path_out = Space_optimisation_config.path_out
#data = pd.read_excel(path_in +'Data template - SS_clean_temp.xlsx', sheet_name='Sheet1')
data = pd.read_excel(path_in +'test_data.xlsx')
data.columns = [col.upper().strip().replace(' ', '_').replace('/', '_') for col in data.columns]
data = data.set_index('BUILDING_NAME')
data['LEASE_TERMINATION_DATE'] = pd.to_datetime(data['LEASE_TERMINATION_DATE'], format = '%Y-%m-%d')
data['EARLY_TERMINATION_DATE'] = pd.to_datetime(data['EARLY_TERMINATION_DATE'], errors='coerce')
data.loc[data['EARLY_TERMINATION_DATE'].isnull(), 'EARLY_TERMINATION_DATE'] = \
data.loc[data['EARLY_TERMINATION_DATE'].isnull(), 'LEASE_TERMINATION_DATE']
data['EARLY_TERMINATION_DATE'] = data['EARLY_TERMINATION_DATE'].apply(lambda x: pd.datetime.now().date() if x.year == 2099 else x)
data.loc[data['LEASE_TYPE'].isin(['Owned', 'OWNED']), 'EARLY_TERMINATION_DATE' ] = pd.datetime.now().date()
data.loc[~(data['LEASE_TYPE'].isin(['Owned', 'OWNED'])) & data['EARLY_TERMINATION_DATE'].isnull() & \
data['LEASE_TERMINATION_DATE'].isnull(), 'EARLY_TERMINATION_DATE' ] = pd.datetime.now().date()
data.loc[data['LEASE_TYPE'].isin(['GROSS', 'Gross']), 'OPEX_SQFT' ] = 0
data['TERMINATION_PENALTY_MNTH'] = 3
data.loc[data['LEASE_TYPE'].isin(['Owned', 'OWNED', 'owned']), 'TERMINATION_PENALTY_MNTH' ] = 0
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + d1.month - d2.month
data['PENALTY_PERIOD'] = data['EARLY_TERMINATION_DATE'].apply(lambda x: diff_month(x, pd.datetime.now().date()))
data.loc[data['PENALTY_PERIOD'] < 0, 'PENALTY_PERIOD'] = 0
data['PENALTY_PERIOD'] = data['PENALTY_PERIOD'] + data['TERMINATION_PENALTY_MNTH']
period = 60
for i in range(1, period+1):
data['PERIOD_'+str(i)] = 0
for i in data.index:
if data.loc[i,'PENALTY_PERIOD'] > 0:
period_end = data.loc[i,'PENALTY_PERIOD'] if data.loc[i,'PENALTY_PERIOD'] < 61 else 60
data.loc[i, 'PERIOD_1': 'PERIOD_'+str(period_end) ] = data.loc[i, 'ANNUAL_RENTAL_COST']/12
output = []
output1 = []
for i in range(len(optimisation_period_list)):
optimisation_period = optimisation_period_list[i]
building_status = pulp.LpVariable.dicts("building_status",
(i for i in data.index),
cat='Binary')
seats = pulp.LpVariable.dicts("seats",
(i for i in data.index),
lowBound=0,
cat='Integer')
lease_breakup_status = pulp.LpVariable.dicts("lease_breakup_status",
(i for i in data.index),
cat='Binary')
cost_of_move_in = pulp.LpVariable.dicts("cost_of_move_in",
(i for i in data.index),
lowBound=0,
cat='Integer')
cost_of_move_out = pulp.LpVariable.dicts("cost_of_move_out",
(i for i in data.index),
lowBound=0,
cat='Integer')
model = pulp.LpProblem("Cost minimising Lease problem", pulp.LpMinimize)
model += pulp.lpSum(
[building_status[build] * (data.loc[build , 'ANNUAL_RENTAL_COST']/12)* \
optimisation_period for build in data.index]
+ [lease_breakup_status[build] * (data.loc[build , \
'PERIOD_1':'PERIOD_'+str(optimisation_period)].sum()) for build in data.index]
+ [cost_of_move_in[build] for build in data.index ]
+ [cost_of_move_out[build] for build in data.index]
+ [building_status[build] * (data.loc[build , 'OPEX_SQFT']/12)* \
optimisation_period for build in data.index]
)
for build in data.index:
max_seat = data.loc[build, 'SPACE_OCCUPANCY']
model += seats[build] <= max_seat * building_status[build]
model += building_status[build] + lease_breakup_status[build] ==1
model += cost_of_move_in[build] >= (seats[build] - data.loc[build, 'ASSIGNED_OCCUPANCY']) \
* data.loc[build, 'AVERAGE_MOVEIN_COST']
model += cost_of_move_out[build] >= (data.loc[build, 'ASSIGNED_OCCUPANCY'] - seats[build]) \
* data.loc[build, 'AVERAGE_MOVEOUT_COST']
model += pulp.lpSum(
[seats[build] for build in data.index]) == pulp.lpSum(
[data.loc[build,'ASSIGNED_OCCUPANCY'] for build in data.index])
model.solve()
pulp.LpStatus[model.status]
# output = []
for build in data.index:
var_output = {
'Report Ran Date': pd.datetime.now().date().strftime('%d-%m-%Y'),
'Optimisation Period': str(optimisation_period) + ' months',
'Building': build,
'Building_status': building_status[build].varValue,
'Current number of People': data.loc[build, 'ASSIGNED_OCCUPANCY'],
'Proposed number of People': seats[build].varValue,
'Cost_of_move_in': cost_of_move_in[build].varValue,
'Cost_of_move_out': cost_of_move_out[build].varValue,
'First Year Lease Cost': building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'],
'First Year OPEX Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'],
'First Year Lease break down Cost': lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_1':'PERIOD_12'].sum()),
'Total First Year Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] +\
building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] + \
lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_1':'PERIOD_12'].sum()) +\
cost_of_move_in[build].varValue + cost_of_move_out[build].varValue,
'Second Year Lease Cost': building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'],
'Second Year OPEX Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'],
'Second Year Lease break down Cost': lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_13':'PERIOD_24'].sum()),
'Total Second Year Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] +\
building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] + \
lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_13':'PERIOD_24'].sum()),
'Third Year Lease Cost': building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'],
'Third Year OPEX Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'],
'Third Year Lease break down Cost': lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_25':'PERIOD_36'].sum()),
'Total Third Year Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] +\
building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] + \
lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_25':'PERIOD_36'].sum()),
'Fourth Year Lease Cost': building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'],
'Fourth Year OPEX Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'],
'Fourth Year Lease break down Cost': lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_37':'PERIOD_48'].sum()),
'Total Fourth Year Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] +\
building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] + \
lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_37':'PERIOD_48'].sum()),
'Fifth Year Lease Cost': building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'],
'Fifth Year OPEX Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'],
'Fifth Year Lease break down Cost': lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_49':'PERIOD_60'].sum()),
'Total Fifth Year Cost': building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] +\
building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] + \
lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_49':'PERIOD_60'].sum())
}
output.append(var_output)
yr_actual_cost = sum([data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index]) + \
sum([data.loc[build , 'OPEX_SQFT'] for build in data.index])
first_yr_proposed_cost = sum([building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] for build in data.index])+\
sum([building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index])+ \
sum([lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_1':'PERIOD_12'].sum()) +\
cost_of_move_in[build].varValue + cost_of_move_out[build].varValue for build in data.index])
second_yr_proposed_cost = sum([building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] for build in data.index])+\
sum([building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index])+ \
sum([lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_13':'PERIOD_24'].sum()) for build in data.index])
third_yr_proposed_cost = sum([building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] for build in data.index])+\
sum([building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index])+ \
sum([lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_25':'PERIOD_36'].sum()) for build in data.index])
fourth_yr_proposed_cost = sum([building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] for build in data.index])+\
sum([building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index])+ \
sum([lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_37':'PERIOD_48'].sum()) for build in data.index])
fifth_yr_proposed_cost = sum([building_status[build].varValue * data.loc[build , 'OPEX_SQFT'] for build in data.index])+\
sum([building_status[build].varValue * data.loc[build , 'ANNUAL_RENTAL_COST'] for build in data.index])+ \
sum([lease_breakup_status[build].varValue * (data.loc[build , \
'PERIOD_49':'PERIOD_60'].sum()) for build in data.index])
total_proposed_cost = first_yr_proposed_cost + second_yr_proposed_cost + third_yr_proposed_cost + \
fourth_yr_proposed_cost + fifth_yr_proposed_cost
proposed_cost = [first_yr_proposed_cost , second_yr_proposed_cost , third_yr_proposed_cost , \
fourth_yr_proposed_cost , fifth_yr_proposed_cost, total_proposed_cost ]
current_cost = [yr_actual_cost, yr_actual_cost, yr_actual_cost, yr_actual_cost, yr_actual_cost, yr_actual_cost*5]
period_yr = ['First Year', 'Second Year', 'Third Year', 'Fourth Year', 'Fifth Year', 'Total']
for i,p in enumerate(period_yr):
var_output = {
'Report Ran Date': pd.datetime.now().date().strftime('%d-%m-%Y'),
'Optimisation Period': str(optimisation_period) + ' months',
'Year': p,
'Current Cost': current_cost[i],
'Proposed Cost': proposed_cost[i],
'Saving': current_cost[i] - proposed_cost[i]
}
output1.append(var_output)
output_df = pd.DataFrame.from_records(output)
report_columns = ['Report Ran Date','Optimisation Period','Building', 'Building_status',\
'Current number of People', 'Proposed number of People',\
'Cost_of_move_in', 'Cost_of_move_out', \
'First Year Lease Cost', 'First Year OPEX Cost','First Year Lease break down Cost', \
'Total First Year Cost',\
'Second Year Lease Cost', 'Second Year OPEX Cost','Second Year Lease break down Cost', \
'Total Second Year Cost',\
'Third Year Lease Cost', 'Third Year OPEX Cost','Third Year Lease break down Cost', \
'Total Third Year Cost',\
'Fourth Year Lease Cost', 'Fourth Year OPEX Cost','Fourth Year Lease break down Cost', \
'Total Fourth Year Cost',\
'Fifth Year Lease Cost', 'Fifth Year OPEX Cost','Fifth Year Lease break down Cost', \
'Total Fifth Year Cost']
output_df.to_csv(path_out+'Test Data File1_details.csv', columns= report_columns, index=False)
output1_df = pd.DataFrame.from_records(output1)
report1_columns = ['Report Ran Date','Optimisation Period','Year', 'Current Cost',\
'Proposed Cost', 'Saving']
output1_df.to_csv(path_out+'Test Data File1_summary.csv', columns= report1_columns, index=False)
endTime = pd.datetime.now()
print("Data Size " + str(data.shape))
print("Execution Start Time: " + str(startTime))
print("Execution End Time: " + str(endTime))
data.columns
building_status[build]
building_status[build]
build
data.to_csv("test.csv")
| 0.152127 | 0.359252 |
```
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
import re
import time
import collections
import os
def build_dataset(words, n_words, atleast=1):
count = [['PAD', 0], ['GO', 1], ['EOS', 2], ['UNK', 3]]
counter = collections.Counter(words).most_common(n_words)
counter = [i for i in counter if i[1] >= atleast]
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
with open('english-train', 'r') as fopen:
text_from = fopen.read().lower().split('\n')[:-1]
with open('vietnam-train', 'r') as fopen:
text_to = fopen.read().lower().split('\n')[:-1]
print('len from: %d, len to: %d'%(len(text_from), len(text_to)))
concat_from = ' '.join(text_from).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)
print('vocab from size: %d'%(vocabulary_size_from))
print('Most common words', count_from[4:10])
print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])
concat_to = ' '.join(text_to).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print('vocab to size: %d'%(vocabulary_size_to))
print('Most common words', count_to[4:10])
print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']
for i in range(len(text_to)):
text_to[i] += ' EOS'
class Chatbot:
def __init__(self, size_layer, num_layers, embedded_size,
from_dict_size, to_dict_size, learning_rate, batch_size):
def cells(size,reuse=False):
return tf.nn.rnn_cell.GRUCell(size,reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.placeholder(tf.int32, [None])
self.Y_seq_len = tf.placeholder(tf.int32, [None])
batch_size = tf.shape(self.X)[0]
encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
main = tf.strided_slice(self.X, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
decoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, decoder_input)
def attention():
attention_mechanism = tf.contrib.seq2seq.LuongAttention(num_units = size_layer//2,
memory = encoder_embedded)
return tf.contrib.seq2seq.AttentionWrapper(cell = cells(size_layer//2),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer//2)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = attention(),
cell_bw = attention(),
inputs = encoder_embedded,
sequence_length = self.X_seq_len,
dtype = tf.float32,
scope = 'bidirectional_rnn_%d'%(n))
encoder_embedded = tf.concat((out_fw, out_bw), 2)
bi_state = tf.concat((state_fw[0],state_bw[0]), -1)
last_state = tuple([bi_state] * num_layers)
with tf.variable_scope("decoder"):
rnn_cells_dec = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer) for _ in range(num_layers)])
outputs, _ = tf.nn.dynamic_rnn(rnn_cells_dec, decoder_embedded,
initial_state = last_state,
dtype = tf.float32)
self.logits = tf.layers.dense(outputs,to_dict_size)
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)
y_t = tf.argmax(self.logits,axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
size_layer = 256
num_layers = 2
embedded_size = 128
learning_rate = 0.001
batch_size = 16
epoch = 20
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot(size_layer, num_layers, embedded_size, len(dictionary_from),
len(dictionary_to), learning_rate,batch_size)
sess.run(tf.global_variables_initializer())
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
ints.append(dic.get(k,UNK))
X.append(ints)
return X
X = str_idx(text_from, dictionary_from)
Y = str_idx(text_to, dictionary_to)
maxlen_question = max([len(x) for x in X]) * 2
maxlen_answer = max([len(y) for y in Y]) * 2
maxlen_question, maxlen_answer
def pad_sentence_batch(sentence_batch, pad_int, maxlen):
padded_seqs = []
seq_lens = []
max_sentence_len = maxlen
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(maxlen)
return padded_seqs, seq_lens
for i in range(epoch):
total_loss, total_accuracy = 0, 0
X, Y = shuffle(X, Y)
for k in range(0, len(text_to), batch_size):
index = min(k + batch_size, len(text_to))
batch_x, seq_x = pad_sentence_batch(X[k: index], PAD, maxlen_answer)
batch_y, seq_y = pad_sentence_batch(Y[k: index], PAD, maxlen_answer)
predicted, accuracy, loss, _ = sess.run([tf.argmax(model.logits,2),
model.accuracy, model.cost, model.optimizer],
feed_dict={model.X:batch_x,
model.Y:batch_y,
model.X_seq_len:seq_x,
model.Y_seq_len:seq_y})
total_loss += loss
total_accuracy += accuracy
total_loss /= (len(text_to) / batch_size)
total_accuracy /= (len(text_to) / batch_size)
print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))
for i in range(len(batch_x)):
print('row %d'%(i+1))
print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))
print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))
print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\n')
```
|
github_jupyter
|
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
import re
import time
import collections
import os
def build_dataset(words, n_words, atleast=1):
count = [['PAD', 0], ['GO', 1], ['EOS', 2], ['UNK', 3]]
counter = collections.Counter(words).most_common(n_words)
counter = [i for i in counter if i[1] >= atleast]
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
with open('english-train', 'r') as fopen:
text_from = fopen.read().lower().split('\n')[:-1]
with open('vietnam-train', 'r') as fopen:
text_to = fopen.read().lower().split('\n')[:-1]
print('len from: %d, len to: %d'%(len(text_from), len(text_to)))
concat_from = ' '.join(text_from).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)
print('vocab from size: %d'%(vocabulary_size_from))
print('Most common words', count_from[4:10])
print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])
concat_to = ' '.join(text_to).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print('vocab to size: %d'%(vocabulary_size_to))
print('Most common words', count_to[4:10])
print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']
for i in range(len(text_to)):
text_to[i] += ' EOS'
class Chatbot:
def __init__(self, size_layer, num_layers, embedded_size,
from_dict_size, to_dict_size, learning_rate, batch_size):
def cells(size,reuse=False):
return tf.nn.rnn_cell.GRUCell(size,reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.placeholder(tf.int32, [None])
self.Y_seq_len = tf.placeholder(tf.int32, [None])
batch_size = tf.shape(self.X)[0]
encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
main = tf.strided_slice(self.X, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
decoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, decoder_input)
def attention():
attention_mechanism = tf.contrib.seq2seq.LuongAttention(num_units = size_layer//2,
memory = encoder_embedded)
return tf.contrib.seq2seq.AttentionWrapper(cell = cells(size_layer//2),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer//2)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw = attention(),
cell_bw = attention(),
inputs = encoder_embedded,
sequence_length = self.X_seq_len,
dtype = tf.float32,
scope = 'bidirectional_rnn_%d'%(n))
encoder_embedded = tf.concat((out_fw, out_bw), 2)
bi_state = tf.concat((state_fw[0],state_bw[0]), -1)
last_state = tuple([bi_state] * num_layers)
with tf.variable_scope("decoder"):
rnn_cells_dec = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer) for _ in range(num_layers)])
outputs, _ = tf.nn.dynamic_rnn(rnn_cells_dec, decoder_embedded,
initial_state = last_state,
dtype = tf.float32)
self.logits = tf.layers.dense(outputs,to_dict_size)
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)
y_t = tf.argmax(self.logits,axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
size_layer = 256
num_layers = 2
embedded_size = 128
learning_rate = 0.001
batch_size = 16
epoch = 20
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot(size_layer, num_layers, embedded_size, len(dictionary_from),
len(dictionary_to), learning_rate,batch_size)
sess.run(tf.global_variables_initializer())
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
ints.append(dic.get(k,UNK))
X.append(ints)
return X
X = str_idx(text_from, dictionary_from)
Y = str_idx(text_to, dictionary_to)
maxlen_question = max([len(x) for x in X]) * 2
maxlen_answer = max([len(y) for y in Y]) * 2
maxlen_question, maxlen_answer
def pad_sentence_batch(sentence_batch, pad_int, maxlen):
padded_seqs = []
seq_lens = []
max_sentence_len = maxlen
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(maxlen)
return padded_seqs, seq_lens
for i in range(epoch):
total_loss, total_accuracy = 0, 0
X, Y = shuffle(X, Y)
for k in range(0, len(text_to), batch_size):
index = min(k + batch_size, len(text_to))
batch_x, seq_x = pad_sentence_batch(X[k: index], PAD, maxlen_answer)
batch_y, seq_y = pad_sentence_batch(Y[k: index], PAD, maxlen_answer)
predicted, accuracy, loss, _ = sess.run([tf.argmax(model.logits,2),
model.accuracy, model.cost, model.optimizer],
feed_dict={model.X:batch_x,
model.Y:batch_y,
model.X_seq_len:seq_x,
model.Y_seq_len:seq_y})
total_loss += loss
total_accuracy += accuracy
total_loss /= (len(text_to) / batch_size)
total_accuracy /= (len(text_to) / batch_size)
print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))
for i in range(len(batch_x)):
print('row %d'%(i+1))
print('QUESTION:',' '.join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0,1,2,3]]))
print('REAL ANSWER:',' '.join([rev_dictionary_to[n] for n in batch_y[i] if n not in[0,1,2,3]]))
print('PREDICTED ANSWER:',' '.join([rev_dictionary_to[n] for n in predicted[i] if n not in[0,1,2,3]]),'\n')
| 0.487551 | 0.500671 |
# Python Function Sample
## Requirements
- Authenticated to gcloud (```gcloud auth application-default login```)
This notebook demonstrate how to develop a python function based model. This type of model is useful as user would be able to define their own logic inside the model as long as it satisfy contract given in `merlin.PyFuncModel`.
The model that we are going to develop is an ensembling of xgboost and sklearn model.
```
!pip install --upgrade -r requirements.txt > /dev/null
import merlin
import warnings
import os
import xgboost as xgb
from merlin.model import ModelType, PyFuncModel
from sklearn import svm
from sklearn.datasets import load_iris
from joblib import dump
warnings.filterwarnings('ignore')
```
## 1. Initialize
### 1.1 Set Server
```
merlin.set_url("localhost:8080/api/merlin")
```
### 1.2 Set Active Project
`project` represent a project in real life. You may have multiple model within a project.
`merlin.set_project(<project_name>)` will set the active project into the name matched by argument. You can only set it to an existing project. If you would like to create a new project, please do so from the MLP console at http://localhost:8080/projects/create.
```
merlin.set_project("sample")
```
### 1.3 Set Active Model
`model` represents an abstract ML model. Conceptually, `model` in MLP is similar to a class in programming language. To instantiate a `model` you'll have to create a `model_version`.
Each `model` has a type, currently model type supported by MLP are: sklearn, xgboost, tensorflow, pytorch, and user defined model (i.e. pyfunc model).
`model_version` represents a snapshot of particular `model` iteration. You'll be able to attach information such as metrics and tag to a given `model_version` as well as deploy it as a model service.
`merlin.set_model(<model_name>, <model_type>)` will set the active model to the name given by parameter, if the model with given name is not found, a new model will be created.
```
merlin.set_model("pyfunc-sample-2", ModelType.PYFUNC)
```
## 2. Train Model
In this step we are going to train 2 IRIS classifier model and combine the prediction result into a single model which will be implemented as a PyFunc type model.
### 2.1 Train First Model
```
model_1_dir = "xgboost-model"
BST_FILE = "model_1.bst"
iris = load_iris()
y = iris['target']
X = iris['data']
dtrain = xgb.DMatrix(X, label=y)
param = {'max_depth': 6,
'eta': 0.1,
'silent': 1,
'nthread': 4,
'num_class': 3,
'objective': 'multi:softprob'
}
xgb_model = xgb.train(params=param, dtrain=dtrain)
model_1_path = os.path.join(model_1_dir, BST_FILE)
xgb_model.save_model(model_1_path)
```
### 2.2 Train Second Model
```
model_2_dir = "sklearn-model"
MODEL_FILE = "model_2.joblib"
model_2_path = os.path.join(model_2_dir, MODEL_FILE)
clf = svm.SVC(gamma='scale', probability=True)
clf.fit(X, y)
dump(clf, model_2_path)
```
### 2.3 Create PyFunc Model
To create a PyFunc model you'll have to extend `merlin.PyFuncModel` class and implement its `initialize` and `infer` method.
`initialize` will be called once during model initialization. The argument to `initialize` is a dictionary containing a key value pair of artifact name and its URL. The artifact's keys are the same value as received by `log_pyfunc_model`.
`infer` method is the prediction method that is need to be implemented. It accept a dictionary type argument which represent incoming request body. `infer` should return a dictionary object which correspond to response body of prediction result.
In following example we are creating PyFunc model called `EnsembleModel`. In its `initialize` method we expect 2 artifacts called `xgb_model` and `sklearn_model`, those 2 artifacts would point to the serialized model file of each model. The `infer` method will simply does prediction for both model and return the average value.
```
import xgboost as xgb
import joblib
import numpy as np
class EnsembleModel(PyFuncModel):
def initialize(self, artifacts):
self._model_1 = xgb.Booster(model_file=artifacts["xgb_model"])
self._model_2 = joblib.load(artifacts["sklearn_model"])
def infer(self, request, **kwargs):
model_input = request["instances"]
inputs = np.array(model_input)
dmatrix = xgb.DMatrix(inputs)
result_1 = self._model_1.predict(dmatrix)
result_2 = self._model_2.predict_proba(inputs)
return {"predictions": ((result_1 + result_2) / 2).tolist()}
```
Let's test it locally
```
m = EnsembleModel()
m.initialize({"xgb_model": model_1_path, "sklearn_model": model_2_path})
m.infer({"instances": [[1,2,3,4], [2,1,2,4]] })
```
## 3. Deploy Model
To deploy the model, we will have to create an iteration of the model (by create a `model_version`), upload the serialized model to MLP, and then deploy.
### 3.1 Create Model Version and Upload
`merlin.new_model_version()` is a convenient method to create a model version and start its development process. It is equal to following codes:
```
v = model.new_model_version()
v.start()
v.log_pyfunc_model(model_instance=EnsembleModel(),
conda_env="env.yaml",
artifacts={"xgb_model": model_1_path, "sklearn_model": model_2_path})
v.finish()
```
To upload PyFunc model you have to provide following arguments:
1. `model_instance` is the instance of PyFunc model, the model has to extend `merlin.PyFuncModel`
2. `conda_env` is path to conda environment yaml file. The environment yaml file must contain all dependency required by the PyFunc model.
3. (Optional) `artifacts` is additional artifact that you want to include in the model
4. (Optional) `code_path` is a list of directory containing python code that will be loaded during model initialization, this is required when `model_instance` depend on local python package
```
with merlin.new_model_version() as v:
merlin.log_pyfunc_model(model_instance=EnsembleModel(),
conda_env="env.yaml",
artifacts={"xgb_model": model_1_path, "sklearn_model": model_2_path})
```
### 3.2 Deploy Model
We can also pass environment variable to the model during deployment by passing a dictionary of environment variables
```
env_vars = {"WORKERS": "1"}
```
Each of a deployed model version will have its own generated url
```
endpoint = merlin.deploy(v, env_vars=env_vars)
```
### 3.3 Send Test Request
```
%%bash -s "$endpoint.url"
curl -v -X POST $1 -d '{
"instances": [
[2.8, 1.0, 6.8, 0.4],
[3.1, 1.4, 4.5, 1.6]
]
}'
```
### 3.4 Delete Deployment
```
merlin.undeploy(v)
```
|
github_jupyter
|
!pip install --upgrade -r requirements.txt > /dev/null
import merlin
import warnings
import os
import xgboost as xgb
from merlin.model import ModelType, PyFuncModel
from sklearn import svm
from sklearn.datasets import load_iris
from joblib import dump
warnings.filterwarnings('ignore')
merlin.set_url("localhost:8080/api/merlin")
merlin.set_project("sample")
merlin.set_model("pyfunc-sample-2", ModelType.PYFUNC)
model_1_dir = "xgboost-model"
BST_FILE = "model_1.bst"
iris = load_iris()
y = iris['target']
X = iris['data']
dtrain = xgb.DMatrix(X, label=y)
param = {'max_depth': 6,
'eta': 0.1,
'silent': 1,
'nthread': 4,
'num_class': 3,
'objective': 'multi:softprob'
}
xgb_model = xgb.train(params=param, dtrain=dtrain)
model_1_path = os.path.join(model_1_dir, BST_FILE)
xgb_model.save_model(model_1_path)
model_2_dir = "sklearn-model"
MODEL_FILE = "model_2.joblib"
model_2_path = os.path.join(model_2_dir, MODEL_FILE)
clf = svm.SVC(gamma='scale', probability=True)
clf.fit(X, y)
dump(clf, model_2_path)
import xgboost as xgb
import joblib
import numpy as np
class EnsembleModel(PyFuncModel):
def initialize(self, artifacts):
self._model_1 = xgb.Booster(model_file=artifacts["xgb_model"])
self._model_2 = joblib.load(artifacts["sklearn_model"])
def infer(self, request, **kwargs):
model_input = request["instances"]
inputs = np.array(model_input)
dmatrix = xgb.DMatrix(inputs)
result_1 = self._model_1.predict(dmatrix)
result_2 = self._model_2.predict_proba(inputs)
return {"predictions": ((result_1 + result_2) / 2).tolist()}
m = EnsembleModel()
m.initialize({"xgb_model": model_1_path, "sklearn_model": model_2_path})
m.infer({"instances": [[1,2,3,4], [2,1,2,4]] })
v = model.new_model_version()
v.start()
v.log_pyfunc_model(model_instance=EnsembleModel(),
conda_env="env.yaml",
artifacts={"xgb_model": model_1_path, "sklearn_model": model_2_path})
v.finish()
with merlin.new_model_version() as v:
merlin.log_pyfunc_model(model_instance=EnsembleModel(),
conda_env="env.yaml",
artifacts={"xgb_model": model_1_path, "sklearn_model": model_2_path})
env_vars = {"WORKERS": "1"}
endpoint = merlin.deploy(v, env_vars=env_vars)
%%bash -s "$endpoint.url"
curl -v -X POST $1 -d '{
"instances": [
[2.8, 1.0, 6.8, 0.4],
[3.1, 1.4, 4.5, 1.6]
]
}'
merlin.undeploy(v)
| 0.458834 | 0.920861 |
<a href="https://colab.research.google.com/github/griscz/beam-college/blob/main/day2/Advanced_grouping_and_aggregations.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# *Advanced grouping and aggregations*
Let's start installing and importing Beam
```
%pip install -q apache-beam[interactive] --no-warn-conflicts
import apache_beam as beam
from apache_beam import pvalue
from apache_beam import Create, FlatMap, Map, ParDo, Filter, Flatten
from apache_beam import CombineGlobally, CombinePerKey
from apache_beam.transforms.combiners import Top, Mean, Count
from apache_beam import pvalue, window, WindowInto
import logging
from apache_beam.runners.interactive.interactive_runner import InteractiveRunner
import apache_beam.runners.interactive.interactive_beam as ib
```
Some of the basic combiner functions are already built-in:
- **`Count`** takes a `PCollection` and outputs the amount of elements.
- **`Top`** outputs the *n* largest/smallest of a `PCollection` given a comparison.
- **`Mean`** outputs the arithmetic mean of a `PCollection`.
Combiners can aggregate using the whole `PCollection` or by key using methods:
- **`.Globally`** applies the combiner to the whole `PCollection`.
- **`.PerKey`** applies the combiner for each key-value in the `Pcollection`.
```
p = beam.Pipeline(InteractiveRunner())
elements = [
{"country": "China", "population": 1389, "continent": "Asia"},
{"country": "India", "population": 1311, "continent": "Asia"},
{"country": "Japan", "population": 126, "continent": "Asia"},
{"country": "USA", "population": 331, "continent": "America"},
{"country": "Ireland", "population": 5, "continent": "Europe"},
{"country": "Indonesia", "population": 273, "continent": "Asia"},
{"country": "Brazil", "population": 212, "continent": "America"},
{"country": "Egypt", "population": 102, "continent": "Africa"},
{"country": "Spain", "population": 47, "continent": "Europe"},
{"country": "Ghana", "population": 31, "continent": "Africa"},
{"country": "Australia", "population": 25, "continent": "Oceania"},
]
create = (p | "Create" >> Create(elements)
| "Map Keys" >> Map(lambda x: (x['continent'], x['population'])))
element_count_total = create | "Total Count" >> Count.Globally()
element_count_grouped = create | "Count Per Key" >> Count.PerKey()
top_grouped = create | "Top" >> Top.PerKey(n=2) # We get the top 2
mean_grouped = create | "Mean" >> Mean.PerKey()
ib.show_graph(p)
ib.show(element_count_total, element_count_grouped, top_grouped, mean_grouped)
```
We can also create our own **Combiners** and apply them both `Globally` and `PerKey`
```
p = beam.Pipeline(InteractiveRunner())
elements = ["Lorem ipsum dolor sit amet. Consectetur adipiscing elit",
"Sed eu velit nec sem vulputate loborti",
"In lobortis augue vitae sagittis molestie. Mauris volutpat tortor non purus elementum",
"Ut blandit massa et risus sollicitudin auctor"]
combine = (p | "Create" >> Create(elements)
| "Join" >> CombineGlobally(lambda x: ". ".join(x)))
ib.show(combine)
p = beam.Pipeline(InteractiveRunner())
elements = [
("Latin", "Lorem ipsum dolor sit amet. Consectetur adipiscing elit. Sed eu velit nec sem vulputate loborti"),
("Latin", "In lobortis augue vitae sagittis molestie. Mauris volutpat tortor non purus elementum"),
("English", "But as the riper should by time decease"),
("English", "That thereby beauty's rose might never die"),
("English", "From fairest creatures we desire increase"),
("Spanish", "tiempo que vivía un hidalgo de los de lanza en astillero, awindow_pcdarga antigua"),
("Spanish", "En un lugar de la Mancha, de cuyo nombre no quiero acordarme, no ha mucho"),
]
combine_key = (p | "Create" >> Create(elements)
| "Join By Language" >> CombinePerKey(lambda x: ". ".join(x)))
ib.show(combine_key)
```
**Combiners** also work on a window basis
```
p = beam.Pipeline(InteractiveRunner())
scores = [
{"player": "Marina", "score": 1000, "timestamp": 0},
{"player": "Cristina", "score": 2000, "timestamp": 10},
{"player": "Cristina", "score": 2000, "timestamp": 50},
{"player": "Marina", "score": 3000, "timestamp": 110},
{"player": "Juan", "score": 2000, "timestamp": 90},
{"player": "Cristina", "score": 2000, "timestamp": 80},
{"player": "Juan", "score": 1000, "timestamp": 100},
]
create = (p | "Create" >> Create(scores)
| "Add timestamps" >> Map(lambda x: window.TimestampedValue(x, x["timestamp"]))
| "To KV" >> Map(lambda x: (x["player"], x["score"]))
)
windowed = create | "FixedWindow" >> WindowInto(window.FixedWindows(60))
total_key = windowed | "Total Per Key" >> CombinePerKey(sum)
ib.show(total_key, include_window_info=True)
```
When using **windows** and **global combiners** we need to add `without_defaults`. This is because the default behaviour is to return a `PCollection` of one element for empty windows.
```
total = (windowed | Map(lambda x: x[1])
| "Total" >> CombineGlobally(sum).without_defaults())
ib.show(total, include_window_info=True)
```
---
Let's try now to create our own `Combiner`. We are going to try to make our copy of `Mean` (i.e., a `Combiner` that calculates the average).
```
p = beam.Pipeline(InteractiveRunner())
def average_fn(elements):
# print(elements)
list_elements = list(elements)
return sum(list_elements)/len(list_elements)
average = (p | "Create" >> Create(range(1000))
| CombineGlobally(average_fn))
ib.show(average)
```
We can see that output is wrong, the average of the first 100 non-negative integers is not 93.95. But why do we get that value?
```
sum(range(100)) / 100
```
We are going to need to use the combiner interface:
<details><summary>Solution</summary>
<p>
```
p = beam.Pipeline(InteractiveRunner())
class AverageFn(beam.CombineFn):
def create_accumulator(self):
sum = 0
count = 0
return sum, count
def add_input(self, accumulator, input):
return accumulator[0] + input, accumulator[1] + 1
def merge_accumulators(self, accumulators):
sums = [x[0] for x in accumulators]
counts = [x[1] for x in accumulators]
return (sum(sums), sum(counts))
def extract_output(self, final_accumulator):
if final_accumulator[1] != 0:
return final_accumulator[0] / final_accumulator[1]
else:
pass
average = (p | "Create" >> Create(range(100))
| CombineGlobally(AverageFn()))
ib.show(average)
```
</p>
### Streaming Example
We'll see this in Dataflow
```
p = beam.Pipeline(DataflowRunner(), options)
topic = "projects/pubsub-public-data/topics/taxirides-realtime"
def first_and_last(element):
key = element[0]
dictionaries = element[1]
output_row = {}
output_row["ride_id"] = key
if len(dictionaries) == 2:
for row in dictionaries:
if row["ride_status"] == "dropoff":
output_row["dropoff"] = row["timestamp"]
if row["ride_status"] == "pickup":
output_row["pickup"] = row["timestamp"]
logging.info(f"Final row {output_row}")
return output_row
else:
logging.warning(f"Length was {len(dictionaries)}")
pass
pubsub = (p | "Read Topic" >> ReadFromPubSub(topic=topic)
| "Json Loads" >> Map(json.loads)
| "Filter" >> Filter(lambda x: x["ride_status"] != "enroute")
| "Parse" >> Map(lambda x: (x["ride_id"], {"ride_status": x["ride_status"], "timestamp": x["timestamp"]})) # KV of ride id, dict
| "Session window" >> WindowInto(window.Sessions(3600),
trigger=trigger.Repeatedly(trigger.AfterCount(2)),
accumulation_mode=trigger.AccumulationMode.DISCARDING
)
| "Combine" >> CombinePerKey(ToListCombineFn())
| Map(first_and_last)
)
p.run()
```
|
github_jupyter
|
%pip install -q apache-beam[interactive] --no-warn-conflicts
import apache_beam as beam
from apache_beam import pvalue
from apache_beam import Create, FlatMap, Map, ParDo, Filter, Flatten
from apache_beam import CombineGlobally, CombinePerKey
from apache_beam.transforms.combiners import Top, Mean, Count
from apache_beam import pvalue, window, WindowInto
import logging
from apache_beam.runners.interactive.interactive_runner import InteractiveRunner
import apache_beam.runners.interactive.interactive_beam as ib
p = beam.Pipeline(InteractiveRunner())
elements = [
{"country": "China", "population": 1389, "continent": "Asia"},
{"country": "India", "population": 1311, "continent": "Asia"},
{"country": "Japan", "population": 126, "continent": "Asia"},
{"country": "USA", "population": 331, "continent": "America"},
{"country": "Ireland", "population": 5, "continent": "Europe"},
{"country": "Indonesia", "population": 273, "continent": "Asia"},
{"country": "Brazil", "population": 212, "continent": "America"},
{"country": "Egypt", "population": 102, "continent": "Africa"},
{"country": "Spain", "population": 47, "continent": "Europe"},
{"country": "Ghana", "population": 31, "continent": "Africa"},
{"country": "Australia", "population": 25, "continent": "Oceania"},
]
create = (p | "Create" >> Create(elements)
| "Map Keys" >> Map(lambda x: (x['continent'], x['population'])))
element_count_total = create | "Total Count" >> Count.Globally()
element_count_grouped = create | "Count Per Key" >> Count.PerKey()
top_grouped = create | "Top" >> Top.PerKey(n=2) # We get the top 2
mean_grouped = create | "Mean" >> Mean.PerKey()
ib.show_graph(p)
ib.show(element_count_total, element_count_grouped, top_grouped, mean_grouped)
p = beam.Pipeline(InteractiveRunner())
elements = ["Lorem ipsum dolor sit amet. Consectetur adipiscing elit",
"Sed eu velit nec sem vulputate loborti",
"In lobortis augue vitae sagittis molestie. Mauris volutpat tortor non purus elementum",
"Ut blandit massa et risus sollicitudin auctor"]
combine = (p | "Create" >> Create(elements)
| "Join" >> CombineGlobally(lambda x: ". ".join(x)))
ib.show(combine)
p = beam.Pipeline(InteractiveRunner())
elements = [
("Latin", "Lorem ipsum dolor sit amet. Consectetur adipiscing elit. Sed eu velit nec sem vulputate loborti"),
("Latin", "In lobortis augue vitae sagittis molestie. Mauris volutpat tortor non purus elementum"),
("English", "But as the riper should by time decease"),
("English", "That thereby beauty's rose might never die"),
("English", "From fairest creatures we desire increase"),
("Spanish", "tiempo que vivía un hidalgo de los de lanza en astillero, awindow_pcdarga antigua"),
("Spanish", "En un lugar de la Mancha, de cuyo nombre no quiero acordarme, no ha mucho"),
]
combine_key = (p | "Create" >> Create(elements)
| "Join By Language" >> CombinePerKey(lambda x: ". ".join(x)))
ib.show(combine_key)
p = beam.Pipeline(InteractiveRunner())
scores = [
{"player": "Marina", "score": 1000, "timestamp": 0},
{"player": "Cristina", "score": 2000, "timestamp": 10},
{"player": "Cristina", "score": 2000, "timestamp": 50},
{"player": "Marina", "score": 3000, "timestamp": 110},
{"player": "Juan", "score": 2000, "timestamp": 90},
{"player": "Cristina", "score": 2000, "timestamp": 80},
{"player": "Juan", "score": 1000, "timestamp": 100},
]
create = (p | "Create" >> Create(scores)
| "Add timestamps" >> Map(lambda x: window.TimestampedValue(x, x["timestamp"]))
| "To KV" >> Map(lambda x: (x["player"], x["score"]))
)
windowed = create | "FixedWindow" >> WindowInto(window.FixedWindows(60))
total_key = windowed | "Total Per Key" >> CombinePerKey(sum)
ib.show(total_key, include_window_info=True)
total = (windowed | Map(lambda x: x[1])
| "Total" >> CombineGlobally(sum).without_defaults())
ib.show(total, include_window_info=True)
p = beam.Pipeline(InteractiveRunner())
def average_fn(elements):
# print(elements)
list_elements = list(elements)
return sum(list_elements)/len(list_elements)
average = (p | "Create" >> Create(range(1000))
| CombineGlobally(average_fn))
ib.show(average)
sum(range(100)) / 100
p = beam.Pipeline(InteractiveRunner())
class AverageFn(beam.CombineFn):
def create_accumulator(self):
sum = 0
count = 0
return sum, count
def add_input(self, accumulator, input):
return accumulator[0] + input, accumulator[1] + 1
def merge_accumulators(self, accumulators):
sums = [x[0] for x in accumulators]
counts = [x[1] for x in accumulators]
return (sum(sums), sum(counts))
def extract_output(self, final_accumulator):
if final_accumulator[1] != 0:
return final_accumulator[0] / final_accumulator[1]
else:
pass
average = (p | "Create" >> Create(range(100))
| CombineGlobally(AverageFn()))
ib.show(average)
p = beam.Pipeline(DataflowRunner(), options)
topic = "projects/pubsub-public-data/topics/taxirides-realtime"
def first_and_last(element):
key = element[0]
dictionaries = element[1]
output_row = {}
output_row["ride_id"] = key
if len(dictionaries) == 2:
for row in dictionaries:
if row["ride_status"] == "dropoff":
output_row["dropoff"] = row["timestamp"]
if row["ride_status"] == "pickup":
output_row["pickup"] = row["timestamp"]
logging.info(f"Final row {output_row}")
return output_row
else:
logging.warning(f"Length was {len(dictionaries)}")
pass
pubsub = (p | "Read Topic" >> ReadFromPubSub(topic=topic)
| "Json Loads" >> Map(json.loads)
| "Filter" >> Filter(lambda x: x["ride_status"] != "enroute")
| "Parse" >> Map(lambda x: (x["ride_id"], {"ride_status": x["ride_status"], "timestamp": x["timestamp"]})) # KV of ride id, dict
| "Session window" >> WindowInto(window.Sessions(3600),
trigger=trigger.Repeatedly(trigger.AfterCount(2)),
accumulation_mode=trigger.AccumulationMode.DISCARDING
)
| "Combine" >> CombinePerKey(ToListCombineFn())
| Map(first_and_last)
)
p.run()
| 0.369656 | 0.982574 |
```
import pandas as pd
cd C:\Users\juanc\Documents\Universidad\EAFIT\Maestría en Ciencia de Datos y Analítica\Semestre 2\Aprendizaje Automático\Supervisado
pwd
# Se puede cargar completo, que tarda mucho
# df = pd.read_csv('WELFake_Dataset.csv/WELFake_Dataset.csv')
# O simplemente una fracción
df = pd.read_csv('WELFake_Dataset.csv/WELFake_Dataset.csv').sample(1000).reset_index(drop=True)
df.head()
df.info()
df.isnull().sum()
df.dropna(inplace=True)
df.isnull().sum()
```
## Se quitan instancias con muy poco texto, se decide en base a la longitud del texto respecto a la longitud del título promedio
```
df.head()
df.title.apply(len).mean()
# Identificar indices donde se da una longitud de texto menor a la longitud del título promedio
df.drop(df.loc[df.text.apply(len) <= 78].index, inplace = True)
df.info()
conteo_categorias=df['label'].value_counts()
tabla_categorias=pd.DataFrame()
tabla_categorias['conteo']=conteo_categorias
tabla_categorias['porcentaje']=(conteo_categorias/df.shape[0]*100)
tabla_categorias['acumulado']=tabla_categorias['porcentaje'].cumsum()
# (0 = fake and 1 = real).
tabla_categorias
```
# Preparación
```
import re
import nltk
import numpy as np
import matplotlib.pyplot as plt
#nltk.download('stopwords')
#nltk.download('words')
stopwords_nltk = set(nltk.corpus.stopwords.words('english'))
df['text'][30]
%time datos = df.rename(columns={'text':'tokens'})
datos = datos.drop('Unnamed: 0',axis=1)
```
# Tokenización
```
# %time datos['tokens'].sample(100)=datos['tokens'].apply(nltk.word_tokenize)
%time datos['tokens']=datos['tokens'].apply(nltk.word_tokenize)
datos.head()
def mostrar_frecuencias(datos):
tokens_concatenate=np.concatenate(datos['tokens'])
fdist = nltk.FreqDist(tokens_concatenate)
topwords = fdist.most_common(20)
x,y = zip(*topwords)
print('Numero de tokens:',len(fdist))
for i,token in enumerate(topwords[0:20]):
print(i+1,token)
plt.figure(figsize=(8,6))
plt.title("Frecuencias de Palabras para Noticias " + str(var))
plt.bar(x,y)
plt.xticks(rotation=90)
plt.show()
```
Se muestran los tokens sin limpiar para resaltar la importancia de este proceso, más adelante se mostrarán los tokens para noticias falsas y reales
```
# Se muestra frecuencia de noticias reales
var = "Reales"
%time mostrar_frecuencias(datos.loc[datos['label'] == 0].reset_index(drop=True))
# Se muestra frecuencia de noticias falsas
var = "Falsas"
%time mostrar_frecuencias(datos.loc[datos['label'] == 0].reset_index(drop=True))
```
# Limpieza de tokens
```
import gensim
from gensim.parsing.preprocessing import remove_stopwords, STOPWORDS
stop_words = nltk.corpus.stopwords.words('english')
def limpiar_tokens(tokens):
# Esta función quita todos los caracteres que no sean alfabeticos
tokens=[re.sub(r'[0-9\. ]+','',token) for token in tokens] # quitar números
tokens=[re.sub(r'[^A-Za-z]+','',token) for token in tokens] # quitar otros caracteres
#tokens=[token for token in tokens if token not in stopwords_nltk]
tokens=[token for token in tokens if token not in stop_words]
tokens=[token.lower() for token in tokens if len(token)>2]
return tokens
%time datos['tokens']=datos['tokens'].apply(limpiar_tokens)
```
## Stemming
```
stemmer=nltk.stem.SnowballStemmer("english")
%time datos['tokens']=datos['tokens'].apply(lambda tokens: [stemmer.stem(w) for w in tokens])
```
# Limpieza adicional
```
# Extensión de stopwords
stop_words.extend(['the','said','trump'])
%time datos['tokens']=datos['tokens'].apply(limpiar_tokens)
```
Se muestra como cambia las frecuencias de datos el proceso de stemming y remoción de caracteres no alfabeticos
```
var = "Reales"
%time mostrar_frecuencias(datos.loc[datos['label'] == 1].reset_index(drop=True))
var = "Falsas"
%time mostrar_frecuencias(datos.loc[datos['label'] == 0].reset_index(drop=True))
```
Se aprecia la mejora en la calidad de las palabaras para el BoW
# Implementación de Algoritmos
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ast import literal_eval
import ast
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.metrics import confusion_matrix,accuracy_score,classification_report, recall_score, f1_score, precision_recall_fscore_support
datos = datos.reset_index(drop=True)
datos.head()
datos.info()
def literal_return(val):
try:
return ast.literal_eval(val)
except ValueError:
return (val)
datos.tokens=datos['tokens'].apply(literal_return)
```
# Matriz TF-IDF
```
%time fdist = nltk.FreqDist(np.concatenate(datos['tokens'].reset_index(drop=True))) #].sampe(10000)
%time tokens=fdist.most_common(len(fdist))
%time tokens_tf=pd.DataFrame(tokens,columns=['token','TF'])
fdist
tokens_tf.shape
tokens_tf.query("TF>1",inplace=True)
tokens_tf.shape
tokens_tf.head()
query="TF<=100000 and TF>=1"
tokens_tf.query(query)
bow = tokens_tf.query(query).token.values
bow.shape
bow
%time tfidf=pd.DataFrame(TfidfVectorizer(vocabulary=bow).fit_transform(datos['tokens'].str.join(" ")).toarray(), columns=bow)
tfidf
X_train, X_test, y_train, y_test = train_test_split(
tfidf.values, datos.label.values, test_size=0.2, random_state=42)
cms_train=[]
cms_test=[]
accuracy_train = []
accuracy_test = []
f1_train = []
f1_test = []
recall_train = []
recall_test = []
reporte_train = []
reporte_test = []
max_depths = np.arange(2,22,2) #(10,110, 10)
for max_depth in max_depths:
print("max_depth:",max_depth)
tree = DecisionTreeClassifier(max_depth=max_depth,class_weight='balanced')
tree.fit(X_train, y_train)
predicciones_train=tree.predict(X_train)
predicciones_test=tree.predict(X_test)
cms_train.append(confusion_matrix(y_train,predicciones_train))
cms_test.append(confusion_matrix(y_test,predicciones_test))
accuracy_train.append(accuracy_score(y_train,predicciones_train))
accuracy_test.append(accuracy_score(y_test,predicciones_test))
f1_train.append(f1_score(y_train,predicciones_train,average='weighted'))
f1_test.append(f1_score(y_test,predicciones_test,average='weighted'))
recall_train.append(recall_score(y_train,predicciones_train,average='weighted'))
recall_test.append(recall_score(y_test,predicciones_test,average='weighted'))
reporte_train.append(precision_recall_fscore_support(y_train,predicciones_train))
reporte_test.append(precision_recall_fscore_support(y_test,predicciones_test))
print("Train:")
print(cms_train[-1])
print(classification_report(y_train,predicciones_train))
print("Test:")
print(cms_test[-1])
print(classification_report(y_test,predicciones_test))
print("-----------")
fig,ax=plt.subplots(2,1,figsize=(10,6),tight_layout=True)
fig.suptitle(" DecisionTree")
ax[0].scatter(max_depths,accuracy_train,s=50,alpha=0.8, label = 'Accuracy')
ax[0].scatter(max_depths,f1_train,s=50,alpha=0.8, label = 'F1-score')
ax[0].scatter(max_depths,recall_train,s=50,alpha=0.8, label = 'Recall')
ax[0].legend()
ax[0].grid()
ax[0].set_title('\n\nTraining')
# ax[0].set_xlabel('max_depth')
ax[1].scatter(max_depths,accuracy_test,s=50,alpha=0.8, label = 'Accuracy')
ax[1].scatter(max_depths,f1_test,s=50,alpha=0.8, label = 'F1-score')
ax[1].scatter(max_depths,recall_test,s=50,alpha=0.8, label = 'Recall')
ax[1].legend()
ax[1].grid()
ax[1].set_title('Test')
ax[1].set_xlabel('max_depth')
plt.show()
```
# Validación Cruzada para el modelo out of sample
```
from sklearn.model_selection import cross_validate
#datos = pd.read_csv('data_equilibrada.csv')
#datos.tokens=datos.tokens.apply(literal_eval)
#bow = pd.read_csv('bow.csv')
#tfidf=pd.DataFrame(TfidfVectorizer(vocabulary=bow.token.values).fit_transform(datos['tokens'].str.join(" ")).toarray(), columns=bow.token.values)
X=tfidf.values
y=datos.label.values
dt = DecisionTreeClassifier(max_depth=8,class_weight='balanced')
cv_dt=cross_validate(dt, X, y, cv=20,scoring=('accuracy','f1_weighted','recall_weighted'),n_jobs=-1)
pd.DataFrame(cv_dt).describe()
plt.hist(cv_dt['test_accuracy'],bins=6)
plt.title("DecissionTree")
plt.xlabel("test_accuracy ")
plt.show()
```
# Calibración de Hiperparámetros
```
# Import moduels for Hyperparameter Tuning
from scipy.stats import randint
from sklearn.model_selection import RandomizedSearchCV
# Setup the parameters and distributions to sample from: param_dist
param_dist = {"max_depth": [3, 30], "min_samples_leaf": randint(1,20), "criterion": ["gini", "entropy"], "splitter": ["best", "random"]}
# Instantiate a Decision Tree classifier: tree
tree = DecisionTreeClassifier()
# Instantiate the RandomizedSearchCV objetc: tree_cv, cv is k-folds.
tree_cv = RandomizedSearchCV(tree, param_dist, cv=5)
# Fit it to the data
tree_cv.fit(X_train, y_train)
# Print the tunred parameters and score
print("Tuned Decision Tree Parameters: {}".format(tree_cv.best_params_))
print("Best score is {}".format(tree_cv.best_score_))
predicciones_test2=tree_cv.predict(X_test)
accuracy_score(y_test,predicciones_test2)
confusion_matrix(y_test,predicciones_test2)
accuracy_score(y_test,predicciones_test2)
f1_score(y_test,predicciones_test2,average='weighted')
recall_score(y_test,predicciones_test2,average='weighted')
precision_recall_fscore_support(y_test,predicciones_test2)
print("Test:")
print(cms_test[-1])
print(classification_report(y_test,predicciones_test))
print("-----------")
```
|
github_jupyter
|
import pandas as pd
cd C:\Users\juanc\Documents\Universidad\EAFIT\Maestría en Ciencia de Datos y Analítica\Semestre 2\Aprendizaje Automático\Supervisado
pwd
# Se puede cargar completo, que tarda mucho
# df = pd.read_csv('WELFake_Dataset.csv/WELFake_Dataset.csv')
# O simplemente una fracción
df = pd.read_csv('WELFake_Dataset.csv/WELFake_Dataset.csv').sample(1000).reset_index(drop=True)
df.head()
df.info()
df.isnull().sum()
df.dropna(inplace=True)
df.isnull().sum()
df.head()
df.title.apply(len).mean()
# Identificar indices donde se da una longitud de texto menor a la longitud del título promedio
df.drop(df.loc[df.text.apply(len) <= 78].index, inplace = True)
df.info()
conteo_categorias=df['label'].value_counts()
tabla_categorias=pd.DataFrame()
tabla_categorias['conteo']=conteo_categorias
tabla_categorias['porcentaje']=(conteo_categorias/df.shape[0]*100)
tabla_categorias['acumulado']=tabla_categorias['porcentaje'].cumsum()
# (0 = fake and 1 = real).
tabla_categorias
import re
import nltk
import numpy as np
import matplotlib.pyplot as plt
#nltk.download('stopwords')
#nltk.download('words')
stopwords_nltk = set(nltk.corpus.stopwords.words('english'))
df['text'][30]
%time datos = df.rename(columns={'text':'tokens'})
datos = datos.drop('Unnamed: 0',axis=1)
# %time datos['tokens'].sample(100)=datos['tokens'].apply(nltk.word_tokenize)
%time datos['tokens']=datos['tokens'].apply(nltk.word_tokenize)
datos.head()
def mostrar_frecuencias(datos):
tokens_concatenate=np.concatenate(datos['tokens'])
fdist = nltk.FreqDist(tokens_concatenate)
topwords = fdist.most_common(20)
x,y = zip(*topwords)
print('Numero de tokens:',len(fdist))
for i,token in enumerate(topwords[0:20]):
print(i+1,token)
plt.figure(figsize=(8,6))
plt.title("Frecuencias de Palabras para Noticias " + str(var))
plt.bar(x,y)
plt.xticks(rotation=90)
plt.show()
# Se muestra frecuencia de noticias reales
var = "Reales"
%time mostrar_frecuencias(datos.loc[datos['label'] == 0].reset_index(drop=True))
# Se muestra frecuencia de noticias falsas
var = "Falsas"
%time mostrar_frecuencias(datos.loc[datos['label'] == 0].reset_index(drop=True))
import gensim
from gensim.parsing.preprocessing import remove_stopwords, STOPWORDS
stop_words = nltk.corpus.stopwords.words('english')
def limpiar_tokens(tokens):
# Esta función quita todos los caracteres que no sean alfabeticos
tokens=[re.sub(r'[0-9\. ]+','',token) for token in tokens] # quitar números
tokens=[re.sub(r'[^A-Za-z]+','',token) for token in tokens] # quitar otros caracteres
#tokens=[token for token in tokens if token not in stopwords_nltk]
tokens=[token for token in tokens if token not in stop_words]
tokens=[token.lower() for token in tokens if len(token)>2]
return tokens
%time datos['tokens']=datos['tokens'].apply(limpiar_tokens)
stemmer=nltk.stem.SnowballStemmer("english")
%time datos['tokens']=datos['tokens'].apply(lambda tokens: [stemmer.stem(w) for w in tokens])
# Extensión de stopwords
stop_words.extend(['the','said','trump'])
%time datos['tokens']=datos['tokens'].apply(limpiar_tokens)
var = "Reales"
%time mostrar_frecuencias(datos.loc[datos['label'] == 1].reset_index(drop=True))
var = "Falsas"
%time mostrar_frecuencias(datos.loc[datos['label'] == 0].reset_index(drop=True))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ast import literal_eval
import ast
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.metrics import confusion_matrix,accuracy_score,classification_report, recall_score, f1_score, precision_recall_fscore_support
datos = datos.reset_index(drop=True)
datos.head()
datos.info()
def literal_return(val):
try:
return ast.literal_eval(val)
except ValueError:
return (val)
datos.tokens=datos['tokens'].apply(literal_return)
%time fdist = nltk.FreqDist(np.concatenate(datos['tokens'].reset_index(drop=True))) #].sampe(10000)
%time tokens=fdist.most_common(len(fdist))
%time tokens_tf=pd.DataFrame(tokens,columns=['token','TF'])
fdist
tokens_tf.shape
tokens_tf.query("TF>1",inplace=True)
tokens_tf.shape
tokens_tf.head()
query="TF<=100000 and TF>=1"
tokens_tf.query(query)
bow = tokens_tf.query(query).token.values
bow.shape
bow
%time tfidf=pd.DataFrame(TfidfVectorizer(vocabulary=bow).fit_transform(datos['tokens'].str.join(" ")).toarray(), columns=bow)
tfidf
X_train, X_test, y_train, y_test = train_test_split(
tfidf.values, datos.label.values, test_size=0.2, random_state=42)
cms_train=[]
cms_test=[]
accuracy_train = []
accuracy_test = []
f1_train = []
f1_test = []
recall_train = []
recall_test = []
reporte_train = []
reporte_test = []
max_depths = np.arange(2,22,2) #(10,110, 10)
for max_depth in max_depths:
print("max_depth:",max_depth)
tree = DecisionTreeClassifier(max_depth=max_depth,class_weight='balanced')
tree.fit(X_train, y_train)
predicciones_train=tree.predict(X_train)
predicciones_test=tree.predict(X_test)
cms_train.append(confusion_matrix(y_train,predicciones_train))
cms_test.append(confusion_matrix(y_test,predicciones_test))
accuracy_train.append(accuracy_score(y_train,predicciones_train))
accuracy_test.append(accuracy_score(y_test,predicciones_test))
f1_train.append(f1_score(y_train,predicciones_train,average='weighted'))
f1_test.append(f1_score(y_test,predicciones_test,average='weighted'))
recall_train.append(recall_score(y_train,predicciones_train,average='weighted'))
recall_test.append(recall_score(y_test,predicciones_test,average='weighted'))
reporte_train.append(precision_recall_fscore_support(y_train,predicciones_train))
reporte_test.append(precision_recall_fscore_support(y_test,predicciones_test))
print("Train:")
print(cms_train[-1])
print(classification_report(y_train,predicciones_train))
print("Test:")
print(cms_test[-1])
print(classification_report(y_test,predicciones_test))
print("-----------")
fig,ax=plt.subplots(2,1,figsize=(10,6),tight_layout=True)
fig.suptitle(" DecisionTree")
ax[0].scatter(max_depths,accuracy_train,s=50,alpha=0.8, label = 'Accuracy')
ax[0].scatter(max_depths,f1_train,s=50,alpha=0.8, label = 'F1-score')
ax[0].scatter(max_depths,recall_train,s=50,alpha=0.8, label = 'Recall')
ax[0].legend()
ax[0].grid()
ax[0].set_title('\n\nTraining')
# ax[0].set_xlabel('max_depth')
ax[1].scatter(max_depths,accuracy_test,s=50,alpha=0.8, label = 'Accuracy')
ax[1].scatter(max_depths,f1_test,s=50,alpha=0.8, label = 'F1-score')
ax[1].scatter(max_depths,recall_test,s=50,alpha=0.8, label = 'Recall')
ax[1].legend()
ax[1].grid()
ax[1].set_title('Test')
ax[1].set_xlabel('max_depth')
plt.show()
from sklearn.model_selection import cross_validate
#datos = pd.read_csv('data_equilibrada.csv')
#datos.tokens=datos.tokens.apply(literal_eval)
#bow = pd.read_csv('bow.csv')
#tfidf=pd.DataFrame(TfidfVectorizer(vocabulary=bow.token.values).fit_transform(datos['tokens'].str.join(" ")).toarray(), columns=bow.token.values)
X=tfidf.values
y=datos.label.values
dt = DecisionTreeClassifier(max_depth=8,class_weight='balanced')
cv_dt=cross_validate(dt, X, y, cv=20,scoring=('accuracy','f1_weighted','recall_weighted'),n_jobs=-1)
pd.DataFrame(cv_dt).describe()
plt.hist(cv_dt['test_accuracy'],bins=6)
plt.title("DecissionTree")
plt.xlabel("test_accuracy ")
plt.show()
# Import moduels for Hyperparameter Tuning
from scipy.stats import randint
from sklearn.model_selection import RandomizedSearchCV
# Setup the parameters and distributions to sample from: param_dist
param_dist = {"max_depth": [3, 30], "min_samples_leaf": randint(1,20), "criterion": ["gini", "entropy"], "splitter": ["best", "random"]}
# Instantiate a Decision Tree classifier: tree
tree = DecisionTreeClassifier()
# Instantiate the RandomizedSearchCV objetc: tree_cv, cv is k-folds.
tree_cv = RandomizedSearchCV(tree, param_dist, cv=5)
# Fit it to the data
tree_cv.fit(X_train, y_train)
# Print the tunred parameters and score
print("Tuned Decision Tree Parameters: {}".format(tree_cv.best_params_))
print("Best score is {}".format(tree_cv.best_score_))
predicciones_test2=tree_cv.predict(X_test)
accuracy_score(y_test,predicciones_test2)
confusion_matrix(y_test,predicciones_test2)
accuracy_score(y_test,predicciones_test2)
f1_score(y_test,predicciones_test2,average='weighted')
recall_score(y_test,predicciones_test2,average='weighted')
precision_recall_fscore_support(y_test,predicciones_test2)
print("Test:")
print(cms_test[-1])
print(classification_report(y_test,predicciones_test))
print("-----------")
| 0.348645 | 0.777279 |
# Convolutional Layer
In this notebook, we visualize four filtered outputs (a.k.a. activation maps) of a convolutional layer.
In this example, *we* are defining four filters that are applied to an input image by initializing the **weights** of a convolutional layer, but a trained CNN will learn the values of these weights.
<img src='notebook_ims/conv_layer.gif' height=60% width=60% />
### Import the image
```
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = 'data/udacity_sdc.png'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32")/255
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
```
### Define and visualize the filters
```
import numpy as np
## TODO: Feel free to modify the numbers here, to try out another filter!
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)
# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# For an example, print out the values of filter 1
print('Filter 1: \n', filter_1)
# visualize all four filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y]<0 else 'black')
```
## Define a convolutional layer
The various layers that make up any neural network are documented, [here](http://pytorch.org/docs/stable/nn.html). For a convolutional neural network, we'll start by defining a:
* Convolutional layer
Initialize a single convolutional layer so that it contains all your created filters. Note that you are not training this network; you are initializing the weights in a convolutional layer so that you can visualize what happens after a forward pass through this network!
#### `__init__` and `forward`
To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the forward behavior of a network that applyies those initialized layers to an input (`x`) in the function `forward`. In PyTorch we convert all inputs into the Tensor datatype, which is similar to a list data type in Python.
Below, I define the structure of a class called `Net` that has a convolutional layer that can contain four 4x4 grayscale filters.
```
import torch
import torch.nn as nn
import torch.nn.functional as F
# define a neural network with a single convolutional layer with four filters
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# assumes there are 4 grayscale filters
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
# calculates the output of a convolutional layer
# pre- and post-activation
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# returns both layers
return conv_x, activated_x
# instantiate the model and set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
model = Net(weight)
# print out the layer in the network
print(model)
```
### Visualize the output of each filter
First, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through.
```
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
```
Let's look at the output of a convolutional layer, before and after a ReLu activation function is applied.
```
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
# get the convolutional layer (pre and post activation)
conv_layer, activated_layer = model(gray_img_tensor)
# visualize the output of a conv layer
viz_layer(conv_layer)
```
#### ReLu activation
In this model, we've used an activation function that scales the output of the convolutional layer. We've chose a ReLu function to do this, and this function simply turns all negative pixel values in 0's (black). See the equation pictured below for input pixel values, `x`.
<img src='notebook_ims/relu_ex.png' height=50% width=50% />
```
# after a ReLu is applied
# visualize the output of an activated conv layer
viz_layer(activated_layer)
```
|
github_jupyter
|
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = 'data/udacity_sdc.png'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32")/255
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
import numpy as np
## TODO: Feel free to modify the numbers here, to try out another filter!
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)
# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# For an example, print out the values of filter 1
print('Filter 1: \n', filter_1)
# visualize all four filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y]<0 else 'black')
import torch
import torch.nn as nn
import torch.nn.functional as F
# define a neural network with a single convolutional layer with four filters
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# assumes there are 4 grayscale filters
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
# calculates the output of a convolutional layer
# pre- and post-activation
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# returns both layers
return conv_x, activated_x
# instantiate the model and set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
model = Net(weight)
# print out the layer in the network
print(model)
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
# get the convolutional layer (pre and post activation)
conv_layer, activated_layer = model(gray_img_tensor)
# visualize the output of a conv layer
viz_layer(conv_layer)
# after a ReLu is applied
# visualize the output of an activated conv layer
viz_layer(activated_layer)
| 0.626238 | 0.987092 |
```
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams
rc('axes', linewidth=5)
from remat.core.dfgraph import gen_linear_graph
from experiments.common.load_keras_model import get_keras_model, CHAIN_GRAPH_MODELS, SEGMENTATION_MODEL_NAMES
from remat.core.solvers.strategy_checkpoint_all import solve_checkpoint_all
from remat.tensorflow2.extraction import dfgraph_from_keras
from remat.core.solvers.strategy_chen import solve_chen_sqrtn, solve_chen_greedy
from remat.core.solvers.strategy_optimal_ilp import solve_ilp_gurobi
from remat.core.solvers.strategy_griewank import solve_griewank
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import LayerNormalization, Dense, Activation, Lambda, Reshape
sns.set('talk')
sns.set_style('white')
RED = "#e74c3c"
BLUE = "#3498db"
flatui = [RED, BLUE, "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
sns.set_palette(flatui)
def plot_matrix(R, ax, title=None):
ax.invert_yaxis()
ax.pcolormesh(R, cmap="Greys", vmin=0, vmax=1)
ax.set_title(title)
def solve_models(g):
checkpoint_all = solve_checkpoint_all(g)
chen_sqrtn = solve_chen_sqrtn(g, True)
griewank = solve_griewank(g, 5)
optimal = solve_ilp_gurobi(g, griewank.schedule_aux_data.activation_ram, approx=False,
solve_r=False, seed_s=chen_sqrtn.schedule_aux_data.S)
return checkpoint_all, chen_sqrtn, griewank, optimal
def plot_model(sols):
checkpoint_all, chen_sqrtn, griewank, optimal = sols
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
plot_matrix(checkpoint_all.schedule_aux_data.S, axs[0, 0], title="Checkpoint all nodes")
plot_matrix(chen_sqrtn.schedule_aux_data.S, axs[0, 1], title="Chen et al. ($\sqrt{n}$)")
plot_matrix(griewank.schedule_aux_data.S, axs[1, 0], title="$\texttt{revolve}$ ($\log{n}$)")
plot_matrix(optimal.schedule_aux_data.S, axs[1, 1], title="Checkmate (optimal)")
plt.plot(fig=fig)
return fig
model = get_keras_model("VGG19")
# g = gen_linear_graph(16)
g = dfgraph_from_keras(model)
print(g.vfwd)
checkpoint_all, chen_sqrtn, griewank, optimal = solve_models(g)
fig = plot_model((checkpoint_all, chen_sqrtn, griewank, optimal))
fig.savefig('out.pdf', bbox_inches='tight')
for method in [checkpoint_all, chen_sqrtn, griewank, optimal]:
aux_data = method.schedule_aux_data
print(aux_data.cpu / 1e9, aux_data.activation_ram / 1e6)
def plot_matrix(R, ax, title=None):
ax.invert_yaxis()
ax.pcolormesh(R, cmap="Greys", vmin=0, vmax=1)
ax.set_title(title)
def solve_models(g):
checkpoint_all = solve_checkpoint_all(g)
print("Checkpoint all activation ram", checkpoint_all.schedule_aux_data.activation_ram)
chen_sqrtn = solve_chen_sqrtn(g, True)
print("Chen activation ram", chen_sqrtn.schedule_aux_data.activation_ram)
griewank = solve_griewank(g, 5)
print("Griewank activation ram", griewank.schedule_aux_data.activation_ram)
optimal = solve_ilp_gurobi(g, griewank.schedule_aux_data.activation_ram, approx=False,
solve_r=False, seed_s=chen_sqrtn.schedule_aux_data.S)
print("optimal activation ram", optimal.schedule_aux_data.activation_ram)
return checkpoint_all, chen_sqrtn, griewank, optimal
def plot_model(sols):
checkpoint_all, chen_sqrtn, griewank, optimal = sols
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
plot_matrix(checkpoint_all.schedule_aux_data.R, axs[0, 0], title="Checkpoint all nodes")
plot_matrix(chen_sqrtn.schedule_aux_data.R, axs[0, 1], title="Chen et al. ($\sqrt{n}$)")
plot_matrix(griewank.schedule_aux_data.R, axs[1, 0], title="$\texttt{revolve}$ ($\log{n}$)")
plot_matrix(optimal.schedule_aux_data.R, axs[1, 1], title="Checkmate (optimal)")
plt.plot(fig=fig)
return fig
model = get_keras_model("VGG19")
# g = gen_linear_graph(16)
g = dfgraph_from_keras(model)
print(g.vfwd)
checkpoint_all, chen_sqrtn, griewank, optimal = solve_models(g)
fig = plot_model((checkpoint_all, chen_sqrtn, griewank, optimal))
fig.savefig('out.pdf', bbox_inches='tight')
for method in [checkpoint_all, chen_sqrtn, griewank, optimal]:
aux_data = method.schedule_aux_data
print(aux_data.cpu / 1e9, aux_data.activation_ram / 1e6)
def plot_matrix(R, ax, title=None):
ax.invert_yaxis()
ax.pcolormesh(R, cmap="Greys", vmin=0, vmax=1)
ax.set_title(title)
def solve_models(g):
checkpoint_all = solve_checkpoint_all(g)
print("Checkpoint all activation ram", checkpoint_all.schedule_aux_data.activation_ram)
chen_sqrtn = solve_chen_sqrtn(g, True)
print("Chen activation ram", chen_sqrtn.schedule_aux_data.activation_ram)
griewank = solve_griewank(g, 5)
print("Griewank activation ram", griewank.schedule_aux_data.activation_ram)
optimal = solve_ilp_gurobi(g, griewank.schedule_aux_data.activation_ram, approx=False,
solve_r=True, seed_s=chen_sqrtn.schedule_aux_data.S)
print("optimal activation ram", optimal.schedule_aux_data.activation_ram)
return checkpoint_all, chen_sqrtn, griewank, optimal
def plot_model(sols):
checkpoint_all, chen_sqrtn, griewank, optimal = sols
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
plot_matrix(checkpoint_all.schedule_aux_data.R, axs[0, 0], title="Checkpoint all nodes")
plot_matrix(chen_sqrtn.schedule_aux_data.R, axs[0, 1], title="Chen et al. ($\sqrt{n}$)")
plot_matrix(griewank.schedule_aux_data.R, axs[1, 0], title="$\texttt{revolve}$ ($\log{n}$)")
plot_matrix(optimal.schedule_aux_data.R, axs[1, 1], title="Checkmate (optimal)")
plt.plot(fig=fig)
return fig
model = get_keras_model("VGG19")
# g = gen_linear_graph(16)
g = dfgraph_from_keras(model)
print(g.vfwd)
checkpoint_all, chen_sqrtn, griewank, optimal = solve_models(g)
fig = plot_model((checkpoint_all, chen_sqrtn, griewank, optimal))
fig.savefig('out.pdf', bbox_inches='tight')
for method in [checkpoint_all, chen_sqrtn, griewank, optimal]:
aux_data = method.schedule_aux_data
print(aux_data.cpu / 1e9, aux_data.activation_ram / 1e6)
!git lg
g
```
|
github_jupyter
|
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams
rc('axes', linewidth=5)
from remat.core.dfgraph import gen_linear_graph
from experiments.common.load_keras_model import get_keras_model, CHAIN_GRAPH_MODELS, SEGMENTATION_MODEL_NAMES
from remat.core.solvers.strategy_checkpoint_all import solve_checkpoint_all
from remat.tensorflow2.extraction import dfgraph_from_keras
from remat.core.solvers.strategy_chen import solve_chen_sqrtn, solve_chen_greedy
from remat.core.solvers.strategy_optimal_ilp import solve_ilp_gurobi
from remat.core.solvers.strategy_griewank import solve_griewank
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import LayerNormalization, Dense, Activation, Lambda, Reshape
sns.set('talk')
sns.set_style('white')
RED = "#e74c3c"
BLUE = "#3498db"
flatui = [RED, BLUE, "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
sns.set_palette(flatui)
def plot_matrix(R, ax, title=None):
ax.invert_yaxis()
ax.pcolormesh(R, cmap="Greys", vmin=0, vmax=1)
ax.set_title(title)
def solve_models(g):
checkpoint_all = solve_checkpoint_all(g)
chen_sqrtn = solve_chen_sqrtn(g, True)
griewank = solve_griewank(g, 5)
optimal = solve_ilp_gurobi(g, griewank.schedule_aux_data.activation_ram, approx=False,
solve_r=False, seed_s=chen_sqrtn.schedule_aux_data.S)
return checkpoint_all, chen_sqrtn, griewank, optimal
def plot_model(sols):
checkpoint_all, chen_sqrtn, griewank, optimal = sols
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
plot_matrix(checkpoint_all.schedule_aux_data.S, axs[0, 0], title="Checkpoint all nodes")
plot_matrix(chen_sqrtn.schedule_aux_data.S, axs[0, 1], title="Chen et al. ($\sqrt{n}$)")
plot_matrix(griewank.schedule_aux_data.S, axs[1, 0], title="$\texttt{revolve}$ ($\log{n}$)")
plot_matrix(optimal.schedule_aux_data.S, axs[1, 1], title="Checkmate (optimal)")
plt.plot(fig=fig)
return fig
model = get_keras_model("VGG19")
# g = gen_linear_graph(16)
g = dfgraph_from_keras(model)
print(g.vfwd)
checkpoint_all, chen_sqrtn, griewank, optimal = solve_models(g)
fig = plot_model((checkpoint_all, chen_sqrtn, griewank, optimal))
fig.savefig('out.pdf', bbox_inches='tight')
for method in [checkpoint_all, chen_sqrtn, griewank, optimal]:
aux_data = method.schedule_aux_data
print(aux_data.cpu / 1e9, aux_data.activation_ram / 1e6)
def plot_matrix(R, ax, title=None):
ax.invert_yaxis()
ax.pcolormesh(R, cmap="Greys", vmin=0, vmax=1)
ax.set_title(title)
def solve_models(g):
checkpoint_all = solve_checkpoint_all(g)
print("Checkpoint all activation ram", checkpoint_all.schedule_aux_data.activation_ram)
chen_sqrtn = solve_chen_sqrtn(g, True)
print("Chen activation ram", chen_sqrtn.schedule_aux_data.activation_ram)
griewank = solve_griewank(g, 5)
print("Griewank activation ram", griewank.schedule_aux_data.activation_ram)
optimal = solve_ilp_gurobi(g, griewank.schedule_aux_data.activation_ram, approx=False,
solve_r=False, seed_s=chen_sqrtn.schedule_aux_data.S)
print("optimal activation ram", optimal.schedule_aux_data.activation_ram)
return checkpoint_all, chen_sqrtn, griewank, optimal
def plot_model(sols):
checkpoint_all, chen_sqrtn, griewank, optimal = sols
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
plot_matrix(checkpoint_all.schedule_aux_data.R, axs[0, 0], title="Checkpoint all nodes")
plot_matrix(chen_sqrtn.schedule_aux_data.R, axs[0, 1], title="Chen et al. ($\sqrt{n}$)")
plot_matrix(griewank.schedule_aux_data.R, axs[1, 0], title="$\texttt{revolve}$ ($\log{n}$)")
plot_matrix(optimal.schedule_aux_data.R, axs[1, 1], title="Checkmate (optimal)")
plt.plot(fig=fig)
return fig
model = get_keras_model("VGG19")
# g = gen_linear_graph(16)
g = dfgraph_from_keras(model)
print(g.vfwd)
checkpoint_all, chen_sqrtn, griewank, optimal = solve_models(g)
fig = plot_model((checkpoint_all, chen_sqrtn, griewank, optimal))
fig.savefig('out.pdf', bbox_inches='tight')
for method in [checkpoint_all, chen_sqrtn, griewank, optimal]:
aux_data = method.schedule_aux_data
print(aux_data.cpu / 1e9, aux_data.activation_ram / 1e6)
def plot_matrix(R, ax, title=None):
ax.invert_yaxis()
ax.pcolormesh(R, cmap="Greys", vmin=0, vmax=1)
ax.set_title(title)
def solve_models(g):
checkpoint_all = solve_checkpoint_all(g)
print("Checkpoint all activation ram", checkpoint_all.schedule_aux_data.activation_ram)
chen_sqrtn = solve_chen_sqrtn(g, True)
print("Chen activation ram", chen_sqrtn.schedule_aux_data.activation_ram)
griewank = solve_griewank(g, 5)
print("Griewank activation ram", griewank.schedule_aux_data.activation_ram)
optimal = solve_ilp_gurobi(g, griewank.schedule_aux_data.activation_ram, approx=False,
solve_r=True, seed_s=chen_sqrtn.schedule_aux_data.S)
print("optimal activation ram", optimal.schedule_aux_data.activation_ram)
return checkpoint_all, chen_sqrtn, griewank, optimal
def plot_model(sols):
checkpoint_all, chen_sqrtn, griewank, optimal = sols
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
plot_matrix(checkpoint_all.schedule_aux_data.R, axs[0, 0], title="Checkpoint all nodes")
plot_matrix(chen_sqrtn.schedule_aux_data.R, axs[0, 1], title="Chen et al. ($\sqrt{n}$)")
plot_matrix(griewank.schedule_aux_data.R, axs[1, 0], title="$\texttt{revolve}$ ($\log{n}$)")
plot_matrix(optimal.schedule_aux_data.R, axs[1, 1], title="Checkmate (optimal)")
plt.plot(fig=fig)
return fig
model = get_keras_model("VGG19")
# g = gen_linear_graph(16)
g = dfgraph_from_keras(model)
print(g.vfwd)
checkpoint_all, chen_sqrtn, griewank, optimal = solve_models(g)
fig = plot_model((checkpoint_all, chen_sqrtn, griewank, optimal))
fig.savefig('out.pdf', bbox_inches='tight')
for method in [checkpoint_all, chen_sqrtn, griewank, optimal]:
aux_data = method.schedule_aux_data
print(aux_data.cpu / 1e9, aux_data.activation_ram / 1e6)
!git lg
g
| 0.694303 | 0.452113 |
# Exercícios Python e Numpy
O objetivo desse notebook é ajudar na fixação dos conteúdos que passamos na aula de Python e Numpy. Sabemos que acabamos passando meio rápido durante a aula, então o objetivo aqui é conseguir treinar os conceitos para você conseguir usa-los na prática mais tarde.
Qualquer dúvida pode ficar a vontade para perguntar no Slack ou diretamente para gente, vamos ficar felizes em ajudar :)
## Python
Aqui na parte de Python vamos passar por algumas das principais coisas que você precisa saber, é claro que não colocamos tudo de importante da linguagem, só o mais necessário.
### Variáveis
```
# Declare uma variavel chamada a e atribua o valor 10 a ela
a = 10
# Imprime essa variavel que você acabou de criar
a #print(a)
# Crie uma outra variavel b que recebe o valor de a so que como string
b = '%d' % a
# Combine sua variavel b a variavel abaixo para obter a string "Hello 10" em uma variavel d
c = "Hello "
d = c+b
# Imprima a variável d
d
```
### Strings
```
my_str = 'Insira uma frase aqui!'
# Substitua a exclamação da frase por uma interrogação
# (Dica: A funcão altera inplace ou retorna uma copia modificada?)
my_str.replace('!', '?', 1)
# Crie uma lista "my_words" com cada palavra a frase
my_words = my_str.split(' ')
my_words
```
### Listas
```
lista = [1, 23, 31, 40, 56, 16]
# Faça um for que imprima cada elemento da lista "lista" (Lembre-se que o for em python é um for each)
for el in lista:
print(el)
# Faça um for que imprima o dobro de cada elemento da lista "lista"
for el in lista:
print(el*2)
# Gere uma lista chamada "dobro" com o dobro de cada elemento de "lista" usando list comprehension
dobro = [el*2 for el in lista]
dobro
# Crie uma nova lista chamada "pares"
# Faça um for que itere sobre a lista "lista" e para cada elemento impar coloque ele no fim da lista "pares"
# Imprima a lista "pares"
pares = [el for el in lista if el%2 != 0 ]
pares
lista2 = ['oi', 2, 2.5, 'top', 'python', 45]
# Faça um for pela "lista2" e imprima todos os elementos que são strings (Dica: pesquise pela função type())
[el for el in lista2 if type(el) == str]
```
#### Indexando
```
my_list = [0, 10, 20, 30, 40, 50, 60, 70]
# Selecione o ultimo elemento da lista
my_list[-1]
# Selecione do primeiro até o 4 elemento da lista
my_list[0:4]
# Selecione do segundo elemento da lista até o quinto
my_list[1:5]
# Selecione do primeiro elemento da lista até o penultimo
my_list[:-1]
```
### Dicionários
```
lista = ['a', 'a', 'b', 'a', 'c', 'd', 'e', 'b', 'b', 'c']
# Crie um dicionario que contenha a contagem de cada elemento do vetor
my_dict = {el:0 for el in lista}
for el in lista:
my_dict[el] += 1
#...
print(my_dict)
```
### Funções
```
# Crie uma função soma_elementos() que recebe uma lista e retorna a soma de todos os seus elementos
def soma_elementos(lista):
return sum(lista)
soma_elementos([1, 2, 3, 4, 5])
soma_elementos([-1, 5, 7, -2])
# Crie uma função produto_escalar() que recebe duas listas de tamanho igual e calcula o produto escalar entre elas
# Dica: Utilize a função zip
def produto_escalar(lista1, lista2):
result = 0
for a, b in zip(lista1, lista2):
result += a*b
return result
produto_escalar([1, 2, 3], [0, 4, 7])
produto_escalar([10, 20, 40, 1], [23, 4, 2, 1])
# Crie uma função par_ou_impar() que recebe um numero n é para cada numero de 1 a n imprime o numero
# seguido de par ou impar, dependendo do que ele seja. Caso o usuário não coloque nada n deve valer 20
# Exemplo: par_ou_impar(4)
# 1 Impar
# 2 Par
# 3 Impar
# 4 Par
def par_ou_impar(n=20):
for i in range(1, n+1):
if i%2 == 0:
print('%d Par' % i)
else:
print('%d Impar' % i)
par_ou_impar(4)
par_ou_impar(15)
# Crie uma função diga_indice() que recebe uma lista e imprime o indice de cada elemento e em seguida
# o proprio elemento
# Exemplo: diga_indice(['oi', 'tudo', 'bem'])
# 0 oi
# 1 tudo
# 2 bem
# (DICA: Pesquise pela função enumerate)
def diga_indice(lista):
for idx, val in enumerate(lista):
print(idx, val)
diga_indice(['1', '2', '3'])
diga_indice(['a', 'b', 'c', 'd', 'e'])
```
## Numpy
O elemento central do numpy são os arrays, então aqui vamos treinar muitas coisas sobre eles
```
# Importando a biblioteca
import numpy as np
```
### Arrays
```
a = np.array([1, 2, 3, 4, 5, 6, 7])
b = np.array([[1, 2, 3, 4],
[5, 6, 7, 8]])
c = np.zeros((3,4))
# Pense em qual é a shape de cada um dos arrays acima
# Depois de pensar imprima cada um deles para conferir sua resposta
print(a.shape)
print(b.shape)
print(c.shape)
# Crie um array de uma dimenção com 20 elementos inteiros aletórios entre 0 e 23bb
np.random.rand(20)
# Crie um array de uns com shape (4, 5)
np.zeros([4, 5])
# Crie um array shape (4, 2) onde cada entrada vale 77
# (Dica: Talvez vc tenha que usar uma multiplicação)
np.ones([4, 2]) * 77
# Gere um array chamado my_sequence com os numeros 0, 10, 20, 30, ..., 90, 100]
np.arange(0, 101, 10)
```
### Indexando
```
my_array = np.random.randint(50, size=(15,))
print(my_array)
# Selecione todos os elementos entre o quinto e o decimo primeiro (intervalo fechado)
my_array[4:10]
# Selecione todos os elementos maiores que 20
my_array[my_array > 20]
my_matrix = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
# Selecione o elemento na primeira linha da terceira coluna
my_matrix[0][2]
# Selecione o elemento na primeira linha da ultima coluna
my_matrix[0][-1]
# Selecione os elementos da matriz para obter o seguinte
# [[6, 7],
# [10, 11]]
my_matrix[1:3, 1:3]
# Selecione os elementos da matriz para obter o seguinte
# [[2, 3, 4],
# [6, 7, 8]]
my_matrix[0:2, -3:]
# Selecione os elementos da ultima coluna inteira
my_matrix[:, [-1]]
# Selecione os elementos da 2a linha inteira
my_matrix[1]
```
### Operações
```
my_array = np.random.randint(10, size=(5,))
print(my_array)
# Some 10 a todos os elementos de my_array
my_array + 10
# Multiplique todos os elementos de my_array por 4
my_array * 4
# Obtenha a soma de todos os elementos de my_array
my_array.sum()
# Obtenha a média de todos os elementos de my_array
my_array.mean()
# Obtenha o indice do maior elemento de my_array
my_array.max()
my_array = np.random.randint(10, size=(5,))
my_other_array = np.random.randint(10, size=(5,))
print(my_array, '\n')
print(my_other_array)
my_array = np.random.randint(10, size=(5,))
my_other_array = np.random.randint(10, size=(10,5))
print(my_array, '\n')
print(my_other_array)
# Some my_array elemento por elemento em cada linha de my_other_array
my_array = np.random.randint(10, size=(5,4))
my_other_array = np.random.randint(10, size=(10,5))
print(my_array, '\n')
print(my_other_array)
# Faça a multiplicação entre my_other_array e my_array
# Descubra a soma dos valores de cada linha de my_other_array
# (Dica: Pesquise sobre o atributo axis da função de soma)
my_array = np.random.randint(10, size=(5,4))
print(my_array)
# Usando reshape transforme a matriz acima em um vetor (Concatendo a linha de baixo na de cima)
np.array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15]])
# Gere a array anterior usando np.arange e a função reshape
a = np.arange(16)
a.reshape([2,-1])
```
|
github_jupyter
|
# Declare uma variavel chamada a e atribua o valor 10 a ela
a = 10
# Imprime essa variavel que você acabou de criar
a #print(a)
# Crie uma outra variavel b que recebe o valor de a so que como string
b = '%d' % a
# Combine sua variavel b a variavel abaixo para obter a string "Hello 10" em uma variavel d
c = "Hello "
d = c+b
# Imprima a variável d
d
my_str = 'Insira uma frase aqui!'
# Substitua a exclamação da frase por uma interrogação
# (Dica: A funcão altera inplace ou retorna uma copia modificada?)
my_str.replace('!', '?', 1)
# Crie uma lista "my_words" com cada palavra a frase
my_words = my_str.split(' ')
my_words
lista = [1, 23, 31, 40, 56, 16]
# Faça um for que imprima cada elemento da lista "lista" (Lembre-se que o for em python é um for each)
for el in lista:
print(el)
# Faça um for que imprima o dobro de cada elemento da lista "lista"
for el in lista:
print(el*2)
# Gere uma lista chamada "dobro" com o dobro de cada elemento de "lista" usando list comprehension
dobro = [el*2 for el in lista]
dobro
# Crie uma nova lista chamada "pares"
# Faça um for que itere sobre a lista "lista" e para cada elemento impar coloque ele no fim da lista "pares"
# Imprima a lista "pares"
pares = [el for el in lista if el%2 != 0 ]
pares
lista2 = ['oi', 2, 2.5, 'top', 'python', 45]
# Faça um for pela "lista2" e imprima todos os elementos que são strings (Dica: pesquise pela função type())
[el for el in lista2 if type(el) == str]
my_list = [0, 10, 20, 30, 40, 50, 60, 70]
# Selecione o ultimo elemento da lista
my_list[-1]
# Selecione do primeiro até o 4 elemento da lista
my_list[0:4]
# Selecione do segundo elemento da lista até o quinto
my_list[1:5]
# Selecione do primeiro elemento da lista até o penultimo
my_list[:-1]
lista = ['a', 'a', 'b', 'a', 'c', 'd', 'e', 'b', 'b', 'c']
# Crie um dicionario que contenha a contagem de cada elemento do vetor
my_dict = {el:0 for el in lista}
for el in lista:
my_dict[el] += 1
#...
print(my_dict)
# Crie uma função soma_elementos() que recebe uma lista e retorna a soma de todos os seus elementos
def soma_elementos(lista):
return sum(lista)
soma_elementos([1, 2, 3, 4, 5])
soma_elementos([-1, 5, 7, -2])
# Crie uma função produto_escalar() que recebe duas listas de tamanho igual e calcula o produto escalar entre elas
# Dica: Utilize a função zip
def produto_escalar(lista1, lista2):
result = 0
for a, b in zip(lista1, lista2):
result += a*b
return result
produto_escalar([1, 2, 3], [0, 4, 7])
produto_escalar([10, 20, 40, 1], [23, 4, 2, 1])
# Crie uma função par_ou_impar() que recebe um numero n é para cada numero de 1 a n imprime o numero
# seguido de par ou impar, dependendo do que ele seja. Caso o usuário não coloque nada n deve valer 20
# Exemplo: par_ou_impar(4)
# 1 Impar
# 2 Par
# 3 Impar
# 4 Par
def par_ou_impar(n=20):
for i in range(1, n+1):
if i%2 == 0:
print('%d Par' % i)
else:
print('%d Impar' % i)
par_ou_impar(4)
par_ou_impar(15)
# Crie uma função diga_indice() que recebe uma lista e imprime o indice de cada elemento e em seguida
# o proprio elemento
# Exemplo: diga_indice(['oi', 'tudo', 'bem'])
# 0 oi
# 1 tudo
# 2 bem
# (DICA: Pesquise pela função enumerate)
def diga_indice(lista):
for idx, val in enumerate(lista):
print(idx, val)
diga_indice(['1', '2', '3'])
diga_indice(['a', 'b', 'c', 'd', 'e'])
# Importando a biblioteca
import numpy as np
a = np.array([1, 2, 3, 4, 5, 6, 7])
b = np.array([[1, 2, 3, 4],
[5, 6, 7, 8]])
c = np.zeros((3,4))
# Pense em qual é a shape de cada um dos arrays acima
# Depois de pensar imprima cada um deles para conferir sua resposta
print(a.shape)
print(b.shape)
print(c.shape)
# Crie um array de uma dimenção com 20 elementos inteiros aletórios entre 0 e 23bb
np.random.rand(20)
# Crie um array de uns com shape (4, 5)
np.zeros([4, 5])
# Crie um array shape (4, 2) onde cada entrada vale 77
# (Dica: Talvez vc tenha que usar uma multiplicação)
np.ones([4, 2]) * 77
# Gere um array chamado my_sequence com os numeros 0, 10, 20, 30, ..., 90, 100]
np.arange(0, 101, 10)
my_array = np.random.randint(50, size=(15,))
print(my_array)
# Selecione todos os elementos entre o quinto e o decimo primeiro (intervalo fechado)
my_array[4:10]
# Selecione todos os elementos maiores que 20
my_array[my_array > 20]
my_matrix = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
# Selecione o elemento na primeira linha da terceira coluna
my_matrix[0][2]
# Selecione o elemento na primeira linha da ultima coluna
my_matrix[0][-1]
# Selecione os elementos da matriz para obter o seguinte
# [[6, 7],
# [10, 11]]
my_matrix[1:3, 1:3]
# Selecione os elementos da matriz para obter o seguinte
# [[2, 3, 4],
# [6, 7, 8]]
my_matrix[0:2, -3:]
# Selecione os elementos da ultima coluna inteira
my_matrix[:, [-1]]
# Selecione os elementos da 2a linha inteira
my_matrix[1]
my_array = np.random.randint(10, size=(5,))
print(my_array)
# Some 10 a todos os elementos de my_array
my_array + 10
# Multiplique todos os elementos de my_array por 4
my_array * 4
# Obtenha a soma de todos os elementos de my_array
my_array.sum()
# Obtenha a média de todos os elementos de my_array
my_array.mean()
# Obtenha o indice do maior elemento de my_array
my_array.max()
my_array = np.random.randint(10, size=(5,))
my_other_array = np.random.randint(10, size=(5,))
print(my_array, '\n')
print(my_other_array)
my_array = np.random.randint(10, size=(5,))
my_other_array = np.random.randint(10, size=(10,5))
print(my_array, '\n')
print(my_other_array)
# Some my_array elemento por elemento em cada linha de my_other_array
my_array = np.random.randint(10, size=(5,4))
my_other_array = np.random.randint(10, size=(10,5))
print(my_array, '\n')
print(my_other_array)
# Faça a multiplicação entre my_other_array e my_array
# Descubra a soma dos valores de cada linha de my_other_array
# (Dica: Pesquise sobre o atributo axis da função de soma)
my_array = np.random.randint(10, size=(5,4))
print(my_array)
# Usando reshape transforme a matriz acima em um vetor (Concatendo a linha de baixo na de cima)
np.array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15]])
# Gere a array anterior usando np.arange e a função reshape
a = np.arange(16)
a.reshape([2,-1])
| 0.208421 | 0.943138 |
```
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
import os
import json
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from vits import commons
from vits import utils
from vits.data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate
from vits.models import SynthesizerTrn
from vits.text.symbols import symbols
from vits.text import text_to_sequence
from scipy.io.wavfile import write
def get_text(text, hps):
text_norm = text_to_sequence(text, hps.data.text_cleaners)
if hps.data.add_blank:
text_norm = commons.intersperse(text_norm, 0)
text_norm = torch.LongTensor(text_norm)
return text_norm
```
## LJ Speech
```
hps = utils.get_hparams_from_file("./configs/ljs_base.json")
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model).cuda()
_ = net_g.eval()
_ = utils.load_checkpoint("/path/to/pretrained_ljs.pth", net_g, None)
stn_tst = get_text("VITS is Awesome!", hps)
with torch.no_grad():
x_tst = stn_tst.cuda().unsqueeze(0)
x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()
audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()
ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))
```
## VCTK
```
hps = utils.get_hparams_from_file("./configs/vctk_base.json")
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model).cuda()
_ = net_g.eval()
_ = utils.load_checkpoint("/path/to/pretrained_vctk.pth", net_g, None)
stn_tst = get_text("VITS is Awesome!", hps)
with torch.no_grad():
x_tst = stn_tst.cuda().unsqueeze(0)
x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()
sid = torch.LongTensor([4]).cuda()
audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()
ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))
```
### Voice Conversion
```
dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
collate_fn = TextAudioSpeakerCollate()
loader = DataLoader(dataset, num_workers=8, shuffle=False,
batch_size=1, pin_memory=True,
drop_last=True, collate_fn=collate_fn)
data_list = list(loader)
with torch.no_grad():
x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [x.cuda() for x in data_list[0]]
sid_tgt1 = torch.LongTensor([1]).cuda()
sid_tgt2 = torch.LongTensor([2]).cuda()
sid_tgt3 = torch.LongTensor([4]).cuda()
audio1 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt1)[0][0,0].data.cpu().float().numpy()
audio2 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt2)[0][0,0].data.cpu().float().numpy()
audio3 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt3)[0][0,0].data.cpu().float().numpy()
print("Original SID: %d" % sid_src.item())
ipd.display(ipd.Audio(y[0].cpu().numpy(), rate=hps.data.sampling_rate, normalize=False))
print("Converted SID: %d" % sid_tgt1.item())
ipd.display(ipd.Audio(audio1, rate=hps.data.sampling_rate, normalize=False))
print("Converted SID: %d" % sid_tgt2.item())
ipd.display(ipd.Audio(audio2, rate=hps.data.sampling_rate, normalize=False))
print("Converted SID: %d" % sid_tgt3.item())
ipd.display(ipd.Audio(audio3, rate=hps.data.sampling_rate, normalize=False))
```
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
import os
import json
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from vits import commons
from vits import utils
from vits.data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate
from vits.models import SynthesizerTrn
from vits.text.symbols import symbols
from vits.text import text_to_sequence
from scipy.io.wavfile import write
def get_text(text, hps):
text_norm = text_to_sequence(text, hps.data.text_cleaners)
if hps.data.add_blank:
text_norm = commons.intersperse(text_norm, 0)
text_norm = torch.LongTensor(text_norm)
return text_norm
hps = utils.get_hparams_from_file("./configs/ljs_base.json")
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model).cuda()
_ = net_g.eval()
_ = utils.load_checkpoint("/path/to/pretrained_ljs.pth", net_g, None)
stn_tst = get_text("VITS is Awesome!", hps)
with torch.no_grad():
x_tst = stn_tst.cuda().unsqueeze(0)
x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()
audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()
ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))
hps = utils.get_hparams_from_file("./configs/vctk_base.json")
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model).cuda()
_ = net_g.eval()
_ = utils.load_checkpoint("/path/to/pretrained_vctk.pth", net_g, None)
stn_tst = get_text("VITS is Awesome!", hps)
with torch.no_grad():
x_tst = stn_tst.cuda().unsqueeze(0)
x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda()
sid = torch.LongTensor([4]).cuda()
audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()
ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))
dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
collate_fn = TextAudioSpeakerCollate()
loader = DataLoader(dataset, num_workers=8, shuffle=False,
batch_size=1, pin_memory=True,
drop_last=True, collate_fn=collate_fn)
data_list = list(loader)
with torch.no_grad():
x, x_lengths, spec, spec_lengths, y, y_lengths, sid_src = [x.cuda() for x in data_list[0]]
sid_tgt1 = torch.LongTensor([1]).cuda()
sid_tgt2 = torch.LongTensor([2]).cuda()
sid_tgt3 = torch.LongTensor([4]).cuda()
audio1 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt1)[0][0,0].data.cpu().float().numpy()
audio2 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt2)[0][0,0].data.cpu().float().numpy()
audio3 = net_g.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt3)[0][0,0].data.cpu().float().numpy()
print("Original SID: %d" % sid_src.item())
ipd.display(ipd.Audio(y[0].cpu().numpy(), rate=hps.data.sampling_rate, normalize=False))
print("Converted SID: %d" % sid_tgt1.item())
ipd.display(ipd.Audio(audio1, rate=hps.data.sampling_rate, normalize=False))
print("Converted SID: %d" % sid_tgt2.item())
ipd.display(ipd.Audio(audio2, rate=hps.data.sampling_rate, normalize=False))
print("Converted SID: %d" % sid_tgt3.item())
ipd.display(ipd.Audio(audio3, rate=hps.data.sampling_rate, normalize=False))
| 0.653127 | 0.625724 |
<!--NAVIGATION-->
< [Layout and Styling of Jupyter widgets](06.00-Layout-and-Styling-Overview.ipynb) | [Contents](00.00-index.ipynb) | [OPTIONAL - Widget label styling](06.02-OPTIONAL-widget-label-styling.ipynb) >
# Layout e estilização de widgets do Jupyter
Esta seção mostra como fazer um layout e estilizar os widgets interativos do Jupyter para criar aplicações ricas e *reativas* baseadas nos mesmos.
Todo widget do Jupyter tem dois atributos para customizar o seu layout e a sua estilização. São eles: `layout` e `style`.
## O atributo `style`
O atributo `style` é usado para expor atributos de estilização do widget não-relacionados ao layout. Para a maior parte dos widgets, o único estilo que pode ser modificado é `description_width`, que é a espessura do label de descrição do widget.
Entretanto, alguns poucos widgets têm configurações de estilização adicionais, como descrito abaixo.
### Exemplo do Style
```
from ipywidgets import Button, ButtonStyle
b2 = Button(description='Custom color', style=dict(button_color='lightgreen'))
b2
b2.style.button_color = 'yellow'
```
É possível obter uma lista dos atributos de estilização para um widget com a propriedade `keys`.
```
b2.style.keys
```
Tal como o atributo `layout`, os estilos dos widgets podem ser designados a outros widgets.
```
b3 = Button(description='Another button', style=b2.style)
b3
```
Os atributos de estilização são específicos para cada tipo de widget.
```
from ipywidgets import IntSlider
s1 = IntSlider(description='Blue handle')
s1.style.handle_color = 'lightblue'
s1
```
Há uma [lista de todas as chaves de estilização](Table_of_widget_keys_and_style_keys.ipynb#Style-keys).
#### Os atributos `button_style` e `bar_style`
Esses atributos nos permitem estilizar alguns widgets com configurações pré-definidas que são baseadas em temas (`theme aware`). Essas propriedades afetam tanto a cor de fundo quando a cor do texto deles. Tais atributos estão disponíveis para os widgets listados abaixo. As opções disponíveis para esses estilos são `success`, `info`, `warning`, `danger`. Os botões também têm a opção `primary`
- **button_style** está disponível para: Button, ToggleButton, ToggleButtons, FileUpload
- **bar_style** está disponível para: FloatProgress, IntProgress
```
b4 = Button(description='Yet another button')
b4
b4.button_style = 'warning'
```
Observe que definir o `style` de um botão sobrescreve `button_style`:
```
b4.style.button_color = 'red' # Deixa a cor vermelha
b4.button_style = 'success' # Não deixa a cor verde, pois a cor foi explicitamente definida neste estilo (?)
```
## O atributo `layout`: o fundamento do layout de um widget
Os widgets interativos do Jupyter têm um atributo `layout` expondo várias propriedades em CSS que impactam como widgets são dispostos.
### As propriedas expostas em CSS
<div class="alert alert-info" style="margin: 20px">
As seguintes propriedades mapeiam para valores das propriedades em CSS de mesmo nome (com '_' sendo substituídos por '-') aplicadas aos principais elementos DOM do widget correspondente.
</div>
<details>
<summary><strong>Sizes</strong></summary>
- `height`
- `width`
- `max_height`
- `max_width`
- `min_height`
- `min_width`
</details>
<details>
<summary><strong>Display</strong></summary>
- `visibility`
- `display`
- `overflow`
</details>
<details>
<summary><strong>Box model</strong></summary>
- `border`
- `margin`
- `padding`
</details>
<details>
<summary><strong>Positioning</strong></summary>
- `top`
- `left`
- `bottom`
- `right`
</details>
<details>
<summary><strong>Image/media</strong></summary>
- `object_fit`
- `object_position`
</details>
<details>
<summary><strong>Flexbox</strong></summary>
- `order`
- `flex_flow`
- `align_items`
- `flex`
- `align_self`
- `align_content`
- `justify_content`
- `justify_items`
</details>
<details>
<summary><strong>Grid layout</strong></summary>
- `grid_auto_columns`
- `grid_auto_flow`
- `grid_auto_rows`
- `grid_gap`
- `grid_template_rows`
- `grid_template_columns`
- `grid_template_areas`
- `grid_row`
- `grid_column`
- `grid_area`
</details>
#### Abreviações de propriedades CSS
Talvez você tenha percebido que certas propriedades CSS tais como `margin-[top/right/bottom/left]` parecem estar faltando. O mesmo vale para `padding-[top/right/bottom/left]` etc.
De fato, você pode especificar atomicamente as margens `[top/right/bottom/left]` com o atributo `margin` apenas ao passar a string `'100px 150px 100px 80px'` para, respectivamente, as margens `top`, `right`, `bottom` e `left` de `100`, `150`, `100` e `80` pixels.
Similarmente, o atributo `flex` pode contar valores para `flex-grow`, `flex-shrink` e `flex-basis`. O atributo `border` é uma propriedade abreviada para `border-width`, `border-style (required)`, e `border-color`.
### Exemplo de `Layout`
O próximo exemplo nos mostra como redimensionar o `Button` para que suas visualizações tenham uma altura de `80px` e uma largura de `50%` do espaço disponível. Ele também inclui um exemplo de como configurar uma propriedade CSS que requer múltiplos valores (uma borda, nesse caso):
```
from ipywidgets import Button, Layout
b1 = Button(description='(50% width, 80px height) button',
layout=Layout(width='50%', height='80px', border='2px dotted blue'))
b1
```
A propriedade `layout` pode ser compartilhada entre múltiplos widgets e atribuída diretamente.
```
Button(description='Another button with the same layout', layout=b1.layout)
b1.layout.width = '30%'
```
#### Exercício
Na célula abaixo, faça com que a borda do botão seja sólida e verde; e faça com que sua largura seja de 70 pixels.
```
b1.layout. # preencha isso, pode ser que tome mais de uma linha
```
## Widgets contendo layouts em CSS
- ### Baseados em Flexbox CSS
A especificação em CSS *Flexbox* é ótima para dispor itens numa única direção, horizontalmente ou verticalmente. Um layout bidimensional pode ser feito com o flexbox usando uma combinação de componentes verticais e horizontais com algumas limitações. Um notebook com mais detalhes sobre [widgets e o modelo Flexbox](reference_guides/guide-flex-box.ipynb) está disponível.
- **Box**: O widget principal para criar o layout em Flexbox
- **HBox**: Widget com o layout horizontal do Flexbox
- **VBox**: Widget com o layout vertical do Flexbox
- ### Baseados em Grid CSS
A especificação em CSS *Grid* é feita para ser usado num layout bidimensional. Há propriedades para especificar o número de itens em cada linha ou coluna, quais serão seus tamanhos, e como os itens devem ser alinhados.
Um notebook com mais detalhes sobre [widgets e o modelo Grid](reference_guides/guide-grid-box.ipynb) está disponível.
- **GridBox**: O widget principal para criar o layout em Grid
- **TwoByTwoLayout**: Um layout com uma grde em 2 x 2
- **AppLayout**: Aplicação como um layout com cabeçalho(header), rodapé(footer) e margens laterais(sidebars)
- **GridspecLayout**: disposição em grade n x m
#### Para mais informação sobre Flexbox e Grid
Se você quer aprender mais sobre a disposição em CSS depois deste tutorial, dê uma olhada nesse [excelente conjunto de artigos sobre layout em CSS no MDN](https://developer.mozilla.org/en-US/docs/Learn/CSS/CSS_layout). Ambos os artigos sobre Flexbox e Grid contêm links em seu final para guias mais extensivos.
## O Layout Flexbox
As classes `HBox` e `VBox` são casos especiais do widget `Box`.
O widget `Box` ativa toda a especificação em CSS do Flexbox, assim como as do layout em Grid, permitindo assim layouts ricos e interativos no notebook do Jupyter. Seu objetivo é prover uma maneira eficiente de dispor, alinhar e distribuir espaço dentre itens num container.
Novamente, toda a especificação de Flexbox está exposta através do atributo `layout` do widget container (`Box`) e dos itens contidos. É possível compatilhar o mesmo atributo `layout` dentre todos os itens contidos.
Nós vamos revisitar mais da especificação flexbox mais tarde neste tutorial. Por hora, vamos dar uma olhada em alguns exemplos usando `VBox` e `HBox`.
### Os auxiliares VBox e HBox
As classes auxiliares `VBox` e `HBox` nos dão padrões simples para organizar os widgets herdeiros em caixas verticais e horizontais.
### Exemplos de HBox e VBox
A maioria dos widgets principais têm alturas e larguras padrão que se dispõem bem juntas. Isso permite que layouts simples baseados nas funções auxiliares `HBox` e `VBox` se alinhem naturalmente.
#### Quatro botões num VBox. Os itens se expandem até a largura máxima, numa caixa vertical tomando `50%` do espaço disponível.
```
from ipywidgets import Layout, Button, VBox
items_layout = Layout(width='auto') #sobrescreve a largura padrão do botão para 'auto' para deixar o botão crescer
box_layout = Layout(border='solid',
width='50%')
words = ['correct', 'horse', 'battery', 'staple']
items = [Button(description=word, layout=items_layout, button_style='danger') for word in words]
box = VBox(children=items, layout=box_layout)
box
```
#### Uma forma reativa
A forma é um `VBox` de largura '50%'. Cada linha no VBox é um HBox, que justifica o conteúdo com um espaço entre ele.
Note que os labels e os elementos interativos estão bem alinhados.
```
from ipywidgets import Layout, HBox, VBox, Button, FloatText, Textarea, Dropdown, Label, IntSlider
# O 'space-between' divide o espaço em branco igualmente entre os elementos
form_item_layout = Layout(justify_content='space-between')
form_items = [
HBox([Label(value='Storage capacity'), IntSlider(min=4, max=512)], layout=form_item_layout),
HBox([Label(value='Egg style'),
Dropdown(options=['Scrambled', 'Sunny side up', 'Over easy'])], layout=form_item_layout),
HBox([Label(value='Ship size'),
FloatText()], layout=form_item_layout),
HBox([Label(value='Information'),
Textarea()], layout=form_item_layout)
]
form = VBox(form_items, layout=Layout(
border='2px solid gray', padding='10px',
align_items='stretch', width='50%')
)
form
```
## O Layout em Grid
A classe `GridBox` é um caso especial do widget `Box`.
Toda a especificação do layout em grid está exposta através do atributo `layout` do widget container (`Box`) e dos itens contidos. É possível compartilhar o mesmo atributo `layout` dentre todos os itens contidos.
Este tutorial foca nas opções de layout de mais alto nível que são baseadas nas especificações do Grid:
- **TwoByTwoLayout**: Um layout com uma grade 2 x 2
- **AppLayout**: Aplicação como um layout com cabeçalho(header), rodapé(footer) e margens laterais(sidebars)
- **GridspecLayout**: disposição em grade m x n
Uma descrição mais detalhada do [layout em Grid está disponível](reference_guides/guide-grid-box.ipynb). O [guia do layout em Grid no MDN](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Grid_Layout#Guides) também é excelente.
## Exemplos de Templates de Layout em Grid
#### Algumas pré-configurações
A célula abaixo cria diversos botões para uso nos exemplos que se seguirão.
```
# Utils widgets
from ipywidgets import Button, Layout, jslink, IntText, IntSlider
def create_expanded_button(description, button_style):
return Button(description=description, button_style=button_style,
layout=Layout(height='auto', width='auto'))
top_left_button = create_expanded_button("Top left", 'info')
top_right_button = create_expanded_button("Top right", 'success')
bottom_left_button = create_expanded_button("Bottom left", 'danger')
bottom_right_button = create_expanded_button("Bottom right", 'warning')
top_left_text = IntText(description='Top left', layout=Layout(width='auto', height='auto'))
top_right_text = IntText(description='Top right', layout=Layout(width='auto', height='auto'))
bottom_left_slider = IntSlider(description='Bottom left', layout=Layout(width='auto', height='auto'))
bottom_right_slider = IntSlider(description='Bottom right', layout=Layout(width='auto', height='auto'))
```
### TwoByTwoLayout
Você pode criar facilmente um layout com 4 widgets dispostos numa grade 2x2 usando o widget `TwoByTwoLayout`:
**Grade 2x2**
```
from ipywidgets import TwoByTwoLayout
TwoByTwoLayout(top_left=top_left_button,
top_right=top_right_button,
bottom_left=bottom_left_button,
bottom_right=bottom_right_button)
```
Se você não definir um widget para alguns dos espaços da grade, o layout irá automaticamente se reconfigurar fundindo células adjacentes
```
TwoByTwoLayout(top_left=top_left_button,
bottom_left=bottom_left_button,
bottom_right=bottom_right_button)
```
Você pode passar `merge=False` no argumento do construtor `TwoByTwoLayout` se você não quer esse comportamento.
```
layout_2x2 = TwoByTwoLayout(top_left=top_left_button,
bottom_left=bottom_left_button,
bottom_right=bottom_right_button,
merge=False)
layout_2x2
```
Você pode acessar os widgets na grade:
```
layout_2x2.bottom_right.button_style = 'primary'
```
É possível adicionar um widget que esteja faltando mesmo após a inicialização do layout:
```
layout_2x2.top_right = top_right_button
layout_2x2.grid_gap = '10px'
```
### bqplot Figure com sliders linkados
É possível criar facilmente layouts mais complexos com widgets customizados. Por exemplo, você pode usar um widget Figure [bqplot](https://github.com/bqplot/bqplot) para adicionar gráficos:
```
import bqplot as bq
import numpy as np
size = 100
np.random.seed(0)
x_data = range(size)
y_data = np.random.randn(size)
y_data_2 = np.random.randn(size)
y_data_3 = np.cumsum(np.random.randn(size) * 100.)
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
bar = bq.Bars(x=np.arange(10), y=np.random.rand(10), scales={'x': x_ord, 'y': y_sc})
ax_x = bq.Axis(scale=x_ord)
ax_y = bq.Axis(scale=y_sc, tick_format='0.2f', orientation='vertical')
fig = bq.Figure(marks=[bar], axes=[ax_x, ax_y], padding_x=0.025, padding_y=0.025,
layout=Layout(width='auto', height='90%'))
from ipywidgets import FloatSlider
max_slider = FloatSlider(min=0, max=10, default_value=2, description="Max: ",
layout=Layout(width='auto', height='auto'))
min_slider = FloatSlider(min=-1, max=10, description="Min: ",
layout=Layout(width='auto', height='auto'))
app = TwoByTwoLayout(top_left=min_slider,
bottom_left=max_slider,
bottom_right=fig,
align_items="center",
height='400px')
jslink((y_sc, 'max'), (max_slider, 'value'))
jslink((y_sc, 'min'), (min_slider, 'value'))
jslink((min_slider, 'max'), (max_slider, 'value'))
jslink((max_slider, 'min'), (min_slider, 'value'))
max_slider.value = 1.5
app
```
## AppLayout
`AppLayout` é um template de layout de widgets que permite que você crie disposições de widgets semelhantes a aplicações. Consiste de um cabeçalho(header), rodapé(footer), duas margens laterais(sidebars) e um painel central(central pane):
```
from ipywidgets import AppLayout, Button, Layout
header_button = create_expanded_button('Header', 'success')
left_button = create_expanded_button('Left', 'info')
center_button = create_expanded_button('Center', 'warning')
right_button = create_expanded_button('Right', 'info')
footer_button = create_expanded_button('Footer', 'success')
AppLayout(header=header_button,
left_sidebar=left_button,
center=center_button,
right_sidebar=right_button,
footer=footer_button)
```
Entretanto, com a ferramenta de fusão automática, é possível conseguir muitos outros layouts:
```
AppLayout(header=header_button,
left_sidebar=left_button,
center=center_button,
right_sidebar=right_button,
footer=None)
AppLayout(header=header_button,
left_sidebar=left_button,
center=center_button,
right_sidebar=None,
footer=footer_button)
```
### Exercício
Na célula abaixo faça um `AppLayout` sem margens laterais.
```
# %load solutions/applayout-no-sides.py
AppLayout(header=header_button,
left_sidebar=None,
center=center_button,
right_sidebar=None,
footer=footer_button)
```
Você também pode modificar as larguras e alturas absolutas e relativas dos painéis usando os argumentos `pane_widths` e `pane_heights`. Ambos aceitam uma seqüência de três elementos, cada qual podendo ser um inteiro (equivalente ao peso dado à linha/coluna) ou uma string no formato `'1fr'` (denotando uma porção do espaço livre disponível), ou `'100px'` (tamanho absoluto).
```
app = AppLayout(header=header_button,
left_sidebar=left_button,
center=center_button,
right_sidebar=right_button,
footer=footer_button)
app
app.pane_widths = ['200px', 3, 1]
app.pane_widths = ['200px', '3fr', '1fr']
app.pane_heights = ['100px', 5, 1]
app.left_sidebar.description = 'New Left'
AppLayout(header=header_button,
left_sidebar=left_button,
center=center_button,
right_sidebar=right_button,
footer=footer_button,
pane_widths=[3, 3, 1],
pane_heights=[1, 5, '60px'])
```
### Exercício
Faça um `AppLayout` no qual haja um cabeçalho, mas não um rodapé ou sidebar direito. Faça com que o centro seja um [bqplot com slider demonstrado acima](#bqplot-Figure-with-linked-sliders).
```
# %load solutions/slider-bqplot-sliders-app.py
container = AppLayout(header=header_button,
left_sidebar=left_button,
center=bqapp,
right_sidebar=None,
footer=None)
container
```
Como um desafio adicional, faça com que o botão do cabeçalho resete os sliders para sua posição original usando um handler de evento.
## GridspecLayout
`GridspecLayout` é um layout de grade M-por-N permitindo definições flexíveis usando uma API similar à do matplotlib [GridSpec](https://matplotlib.org/tutorials/intermediate/gridspec.html#sphx-glr-tutorials-intermediate-gridspec-py).
Você pode usar `GridspecLayout` para definir uma grade simples regularmente espaçada. Por exemplo, para criar um layout 4x3:
**Grade M x N de botões**
```
from ipywidgets import GridspecLayout
grid = GridspecLayout(4, 3)
for i in range(4):
for j in range(3):
grid[i, j] = create_expanded_button('Button {} - {}'.format(i, j), 'warning')
grid
```
**Alcance das linhas e/ou colunas**
Para fazer com que um widget alcance diversas colunas e/ou linhas, você pode usar notação slice:
```
grid = GridspecLayout(4, 3)
grid[:3, 1:] = create_expanded_button('One', 'success')
grid[:, 0] = create_expanded_button('Two', 'info')
grid[3, 1] = create_expanded_button('Three', 'warning')
grid[3, 2] = create_expanded_button('Four', 'danger')
grid
```
Ainda é possível modificar as propriedades dos widgets armazenados na grade usando a mesma notação indexada.
```
grid[0, 0].description = "I am the blue one"
```
**Nota**: É suficiente passar um índice de uma das células da grade ocupada pelo widget de interesse.
```
grid[3, 1] = create_expanded_button('New button!!', 'danger')
grid[:3, 1:] = create_expanded_button('I am new too!!!!!', 'warning')
grid[2, 2].description = 'A better label'
```
### Mapear uma grade com gráficos de dispersão e de barra
Neste exemplo, vamos demonstrar como usar os widgets `GridspecLayout` e `bqplot` para criar um gráfico de dispersão multipainel. Para rodar esse exemplo será necessário instalar o pacote [bqplot](https://github.com/bqplot/bqplot).
```
import bqplot as bq
import numpy as np
from ipywidgets import GridspecLayout, Button, Layout
n_features = 3
data = np.random.randn(100, n_features)
data[:50, 2] += 4 * data[:50, 0] **2
data[50:, :] += 4
A = np.random.randn(n_features, n_features)/5
data = np.dot(data,A)
scales_x = [bq.LinearScale() for i in range(n_features)]
scales_y = [bq.LinearScale() for i in range(n_features)]
gs = GridspecLayout(n_features, n_features)
for i in range(n_features):
for j in range(n_features):
if i != j:
sc_x = scales_x[j]
sc_y = scales_y[i]
scatt = bq.Scatter(x=data[:, j], y=data[:, i], scales={'x': sc_x, 'y': sc_y})
gs[i, j] = bq.Figure(marks=[scatt], layout=Layout(width='auto', height='auto'),
fig_margin=dict(top=5, bottom=5, left=5, right=5), background_style={'fill':'#f5f5ff'})
else:
sc_x = scales_x[j]
sc_y = bq.LinearScale()
hist = bq.Hist(sample=data[:,i], scales={'sample': sc_x, 'count': sc_y})
gs[i, j] = bq.Figure(marks=[hist], layout=Layout(width='auto', height='auto'),
fig_margin=dict(top=5, bottom=5, left=5, right=5), background_style={'fill':'#f5f5ff'})
gs
```
## Exemplos de GridBox
### Grade 3x3 com tamanhos de linha & coluna customizados
```
from ipywidgets import Button, GridBox, Layout, ButtonStyle
GridBox(children=[Button(description=str(i), layout=Layout(width='auto', height='auto'),
style=ButtonStyle(button_color='darkseagreen')) for i in range(12)
],
layout=Layout(
width='50%',
grid_template_columns='100px 50px 100px',
grid_template_rows='80px auto 80px',
grid_gap='5px 10px')
)
```
### Exercícios
**Adicione mais botões**
Modifique o código acima para colocar mais botões no `GridBox` (*não* modifique o layout). Qualquer número de botões acima de 9 está OK.
1. O que acontece com os botões extras? Eles estão dispostos como os primeiros 9 botões?
O template de grade define uma grade 3x3. Se herdeiros adicionais são colocados na grade, suas propriedades serão determinadas pelas propriedades de layout `grid_auto_columns`, `grid_auto_rows` e `grid_auto_flow`.
2. Defina `grid_auto_rows="10px"` e execute o exemplo novamente com mais de 9 botões.
3. Defina `grid_auto_rows` para que as linhas adicionadas automaticamente tenham o mesmo formato das linhas do template.
## Uma maneira alternativa de definir a grade, usando áreas da mesma
A grade também pode ser configurada usando palavras de descrição. O layout abaixo define uma grade com 4 colunas e 3 linhas. A primeira linha é um cabeçalho, a última é um rodapé, e a do meio possui conteúdo nas duas primeiras colunas, depois uma célula vazia, seguida por um sidebar.
Os widgets são atribuídos a cada uma dessas áreas definindo o layout `grid_area` deles como o nome da área.
```
"header header header header"
"main main . sidebar "
"footer footer footer footer"
```
```
header = Button(description='Header',
layout=Layout(width='auto', height='auto', grid_area='header'),
style=ButtonStyle(button_color='lightblue'))
main = Button(description='Main',
layout=Layout(width='auto', height='auto', grid_area='main'),
style=ButtonStyle(button_color='moccasin'))
sidebar = Button(description='Sidebar',
layout=Layout(width='auto', height='auto', grid_area='sidebar'),
style=ButtonStyle(button_color='salmon'))
footer = Button(description='Footer',
layout=Layout(width='auto', height='auto', grid_area='footer'),
style=ButtonStyle(button_color='olive'))
GridBox(children=[header, main, sidebar, footer],
layout=Layout(
width='50%',
align_items='stretch',
grid_template_rows='auto auto auto',
grid_template_columns='25% 25% 25% 25%',
grid_template_areas='''
"header header header header"
"main main . sidebar"
"footer footer footer footer"
''')
)
```
### Exercícios
#### Aumente a área principal
1. Adicione uma linha ou duas ao template de área para que a área principal tenha 3 linhas de altura e 2 colunas de largura.
<!--NAVIGATION-->
< [Layout and Styling of Jupyter widgets](06.00-Layout-and-Styling-Overview.ipynb) | [Contents](00.00-index.ipynb) | [OPTIONAL - Widget label styling](06.02-OPTIONAL-widget-label-styling.ipynb) >
|
github_jupyter
|
from ipywidgets import Button, ButtonStyle
b2 = Button(description='Custom color', style=dict(button_color='lightgreen'))
b2
b2.style.button_color = 'yellow'
b2.style.keys
b3 = Button(description='Another button', style=b2.style)
b3
from ipywidgets import IntSlider
s1 = IntSlider(description='Blue handle')
s1.style.handle_color = 'lightblue'
s1
b4 = Button(description='Yet another button')
b4
b4.button_style = 'warning'
b4.style.button_color = 'red' # Deixa a cor vermelha
b4.button_style = 'success' # Não deixa a cor verde, pois a cor foi explicitamente definida neste estilo (?)
from ipywidgets import Button, Layout
b1 = Button(description='(50% width, 80px height) button',
layout=Layout(width='50%', height='80px', border='2px dotted blue'))
b1
Button(description='Another button with the same layout', layout=b1.layout)
b1.layout.width = '30%'
b1.layout. # preencha isso, pode ser que tome mais de uma linha
from ipywidgets import Layout, Button, VBox
items_layout = Layout(width='auto') #sobrescreve a largura padrão do botão para 'auto' para deixar o botão crescer
box_layout = Layout(border='solid',
width='50%')
words = ['correct', 'horse', 'battery', 'staple']
items = [Button(description=word, layout=items_layout, button_style='danger') for word in words]
box = VBox(children=items, layout=box_layout)
box
from ipywidgets import Layout, HBox, VBox, Button, FloatText, Textarea, Dropdown, Label, IntSlider
# O 'space-between' divide o espaço em branco igualmente entre os elementos
form_item_layout = Layout(justify_content='space-between')
form_items = [
HBox([Label(value='Storage capacity'), IntSlider(min=4, max=512)], layout=form_item_layout),
HBox([Label(value='Egg style'),
Dropdown(options=['Scrambled', 'Sunny side up', 'Over easy'])], layout=form_item_layout),
HBox([Label(value='Ship size'),
FloatText()], layout=form_item_layout),
HBox([Label(value='Information'),
Textarea()], layout=form_item_layout)
]
form = VBox(form_items, layout=Layout(
border='2px solid gray', padding='10px',
align_items='stretch', width='50%')
)
form
# Utils widgets
from ipywidgets import Button, Layout, jslink, IntText, IntSlider
def create_expanded_button(description, button_style):
return Button(description=description, button_style=button_style,
layout=Layout(height='auto', width='auto'))
top_left_button = create_expanded_button("Top left", 'info')
top_right_button = create_expanded_button("Top right", 'success')
bottom_left_button = create_expanded_button("Bottom left", 'danger')
bottom_right_button = create_expanded_button("Bottom right", 'warning')
top_left_text = IntText(description='Top left', layout=Layout(width='auto', height='auto'))
top_right_text = IntText(description='Top right', layout=Layout(width='auto', height='auto'))
bottom_left_slider = IntSlider(description='Bottom left', layout=Layout(width='auto', height='auto'))
bottom_right_slider = IntSlider(description='Bottom right', layout=Layout(width='auto', height='auto'))
from ipywidgets import TwoByTwoLayout
TwoByTwoLayout(top_left=top_left_button,
top_right=top_right_button,
bottom_left=bottom_left_button,
bottom_right=bottom_right_button)
TwoByTwoLayout(top_left=top_left_button,
bottom_left=bottom_left_button,
bottom_right=bottom_right_button)
layout_2x2 = TwoByTwoLayout(top_left=top_left_button,
bottom_left=bottom_left_button,
bottom_right=bottom_right_button,
merge=False)
layout_2x2
layout_2x2.bottom_right.button_style = 'primary'
layout_2x2.top_right = top_right_button
layout_2x2.grid_gap = '10px'
import bqplot as bq
import numpy as np
size = 100
np.random.seed(0)
x_data = range(size)
y_data = np.random.randn(size)
y_data_2 = np.random.randn(size)
y_data_3 = np.cumsum(np.random.randn(size) * 100.)
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
bar = bq.Bars(x=np.arange(10), y=np.random.rand(10), scales={'x': x_ord, 'y': y_sc})
ax_x = bq.Axis(scale=x_ord)
ax_y = bq.Axis(scale=y_sc, tick_format='0.2f', orientation='vertical')
fig = bq.Figure(marks=[bar], axes=[ax_x, ax_y], padding_x=0.025, padding_y=0.025,
layout=Layout(width='auto', height='90%'))
from ipywidgets import FloatSlider
max_slider = FloatSlider(min=0, max=10, default_value=2, description="Max: ",
layout=Layout(width='auto', height='auto'))
min_slider = FloatSlider(min=-1, max=10, description="Min: ",
layout=Layout(width='auto', height='auto'))
app = TwoByTwoLayout(top_left=min_slider,
bottom_left=max_slider,
bottom_right=fig,
align_items="center",
height='400px')
jslink((y_sc, 'max'), (max_slider, 'value'))
jslink((y_sc, 'min'), (min_slider, 'value'))
jslink((min_slider, 'max'), (max_slider, 'value'))
jslink((max_slider, 'min'), (min_slider, 'value'))
max_slider.value = 1.5
app
from ipywidgets import AppLayout, Button, Layout
header_button = create_expanded_button('Header', 'success')
left_button = create_expanded_button('Left', 'info')
center_button = create_expanded_button('Center', 'warning')
right_button = create_expanded_button('Right', 'info')
footer_button = create_expanded_button('Footer', 'success')
AppLayout(header=header_button,
left_sidebar=left_button,
center=center_button,
right_sidebar=right_button,
footer=footer_button)
AppLayout(header=header_button,
left_sidebar=left_button,
center=center_button,
right_sidebar=right_button,
footer=None)
AppLayout(header=header_button,
left_sidebar=left_button,
center=center_button,
right_sidebar=None,
footer=footer_button)
# %load solutions/applayout-no-sides.py
AppLayout(header=header_button,
left_sidebar=None,
center=center_button,
right_sidebar=None,
footer=footer_button)
app = AppLayout(header=header_button,
left_sidebar=left_button,
center=center_button,
right_sidebar=right_button,
footer=footer_button)
app
app.pane_widths = ['200px', 3, 1]
app.pane_widths = ['200px', '3fr', '1fr']
app.pane_heights = ['100px', 5, 1]
app.left_sidebar.description = 'New Left'
AppLayout(header=header_button,
left_sidebar=left_button,
center=center_button,
right_sidebar=right_button,
footer=footer_button,
pane_widths=[3, 3, 1],
pane_heights=[1, 5, '60px'])
# %load solutions/slider-bqplot-sliders-app.py
container = AppLayout(header=header_button,
left_sidebar=left_button,
center=bqapp,
right_sidebar=None,
footer=None)
container
from ipywidgets import GridspecLayout
grid = GridspecLayout(4, 3)
for i in range(4):
for j in range(3):
grid[i, j] = create_expanded_button('Button {} - {}'.format(i, j), 'warning')
grid
grid = GridspecLayout(4, 3)
grid[:3, 1:] = create_expanded_button('One', 'success')
grid[:, 0] = create_expanded_button('Two', 'info')
grid[3, 1] = create_expanded_button('Three', 'warning')
grid[3, 2] = create_expanded_button('Four', 'danger')
grid
grid[0, 0].description = "I am the blue one"
grid[3, 1] = create_expanded_button('New button!!', 'danger')
grid[:3, 1:] = create_expanded_button('I am new too!!!!!', 'warning')
grid[2, 2].description = 'A better label'
import bqplot as bq
import numpy as np
from ipywidgets import GridspecLayout, Button, Layout
n_features = 3
data = np.random.randn(100, n_features)
data[:50, 2] += 4 * data[:50, 0] **2
data[50:, :] += 4
A = np.random.randn(n_features, n_features)/5
data = np.dot(data,A)
scales_x = [bq.LinearScale() for i in range(n_features)]
scales_y = [bq.LinearScale() for i in range(n_features)]
gs = GridspecLayout(n_features, n_features)
for i in range(n_features):
for j in range(n_features):
if i != j:
sc_x = scales_x[j]
sc_y = scales_y[i]
scatt = bq.Scatter(x=data[:, j], y=data[:, i], scales={'x': sc_x, 'y': sc_y})
gs[i, j] = bq.Figure(marks=[scatt], layout=Layout(width='auto', height='auto'),
fig_margin=dict(top=5, bottom=5, left=5, right=5), background_style={'fill':'#f5f5ff'})
else:
sc_x = scales_x[j]
sc_y = bq.LinearScale()
hist = bq.Hist(sample=data[:,i], scales={'sample': sc_x, 'count': sc_y})
gs[i, j] = bq.Figure(marks=[hist], layout=Layout(width='auto', height='auto'),
fig_margin=dict(top=5, bottom=5, left=5, right=5), background_style={'fill':'#f5f5ff'})
gs
from ipywidgets import Button, GridBox, Layout, ButtonStyle
GridBox(children=[Button(description=str(i), layout=Layout(width='auto', height='auto'),
style=ButtonStyle(button_color='darkseagreen')) for i in range(12)
],
layout=Layout(
width='50%',
grid_template_columns='100px 50px 100px',
grid_template_rows='80px auto 80px',
grid_gap='5px 10px')
)
"header header header header"
"main main . sidebar "
"footer footer footer footer"
header = Button(description='Header',
layout=Layout(width='auto', height='auto', grid_area='header'),
style=ButtonStyle(button_color='lightblue'))
main = Button(description='Main',
layout=Layout(width='auto', height='auto', grid_area='main'),
style=ButtonStyle(button_color='moccasin'))
sidebar = Button(description='Sidebar',
layout=Layout(width='auto', height='auto', grid_area='sidebar'),
style=ButtonStyle(button_color='salmon'))
footer = Button(description='Footer',
layout=Layout(width='auto', height='auto', grid_area='footer'),
style=ButtonStyle(button_color='olive'))
GridBox(children=[header, main, sidebar, footer],
layout=Layout(
width='50%',
align_items='stretch',
grid_template_rows='auto auto auto',
grid_template_columns='25% 25% 25% 25%',
grid_template_areas='''
"header header header header"
"main main . sidebar"
"footer footer footer footer"
''')
)
| 0.494141 | 0.932607 |
```
import numpy as np
import pandas as pd
from copy import deepcopy # Necessário para copiar os dados dentro de funções e evitar alterações inplace dos dados
# Isso para que as funções recebam um dado e gerem um dado novo, mantendo o original inalterado.
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE, SMOTENC
from imblearn.pipeline import Pipeline
from imblearn.under_sampling import RandomUnderSampler
DATA_PATH = '../dados/dados_treino.csv'
raw_data = pd.read_csv(DATA_PATH)
raw_data.info() # Informaçãões gerais do dataset. Permite ver se há valores NULL.
raw_data.drop_duplicates().info()
raw_data.head()
# Dataset de referencia:
import sklearn.datasets
from sklearn import preprocessing
X,y = sklearn.datasets.load_boston(return_X_y=True)
print(type(X))
```
## OBS: Essa classe está em construção e deve ser adequada antes de seu uso! No momento está feita praticamente como um aglomerado de funções
```
class Preprocessor():
'''
Saída final esperada: um dicionário com as saídas
'''
# colocar as variáveis de interesse da classe:
# especialmente o dataframe base, e dados de trabalho
def __init__(self):
# colocar as variáveis de interesse da classe:
# especialmente o dataframe de entrada
pass
def _bool_to_int(self, dataframe: pd.DataFrame) -> pd.DataFrame:
'''
Converte as colunas do tipo bool para o tipo int (0 e 1).
'''
columns = ['Tem_Instr_Violao_Viola', 'Tem_Instr_Guitarra', 'Tem_Instr_Cavaco',
'Tem_Instr_Sintetizador_Teclado', 'Tem_Instr_Piano', 'Tem_Instr_Metais',
'Tem_Instr_Madeiras', 'Tem_Instr_Cordas', 'gostou'] # Adicionar as colunas da bateria aqui depois
new_df = deepcopy(dataframe)
for col in columns:
new_df[col] = new_df[col].astype(int)
return new_df
def _bateria_to_bool(self, dataframe: pd.DataFrame) -> pd.DataFrame:
'''
Transforma a coluna bateria para ser representada por binários
Essa função pode ser modificada para processar a coluna bateria de formas distintas.
'''
new_df = deepcopy(dataframe)
new_df['bateria_eletronica'] = (new_df['bateria'] == 'Eletrônica').astype(int)
new_df['bateria_acustica'] = (new_df['bateria'] == 'Acústica').astype(int)
new_df['bateria_nenhuma'] = (new_df['bateria'] == 'Nenhuma').astype(int)
new_df = new_df.drop(columns = ['bateria'])
return new_df
def _get_user_data(self, USER: str, raw_data: pd.DataFrame) -> pd.DataFrame:
user_data = raw_data[raw_data['id_cliente'] == USER]
user_data = bool_to_int(user_data)
user_data = bateria_to_bool(user_data)
user_data['modo'] = user_data['modo'].fillna("K")
return user_data
def scale_to_bool(self,scale):
string = {
'K' : 1,
'c' : 0,
'c#' : 0,
'C' : 1,
'C#' : 1,
'd' : 0,
'd#' : 0,
'D' : 1,
'D#' : 1,
'e' : 0,
'E' : 1,
'f' : 0,
'f#' : 0,
'F' : 1,
'F#' : 1,
'g' : 0,
'g#' : 0,
'G' : 1,
'G#' : 1,
'a' : 0,
'a#' : 0,
'A' : 1,
'A#' : 1,
'b' : 0,
'B' : 1
}[scale]
return string
def scale_to_one_hot(self, raw_data: pd.DataFrame):
# One hot encoding the mode feature data
new_df = deepcopy(raw_data)
mode_df = pd.get_dummies(new_df["modo"])
new_df = new_df.drop(columns = ['modo'])
# new_df.merge(mode_df,how='left', on='duracao')
# Place the DataFrames side by side
new_df = pd.concat([new_df,mode_df], axis=1)
return new_df
def preprocess(self):
'''
Aplica todas as estratégias de preprocessamento para adequar à estrutura entendida pelo sklearn
'''
raw_data = pd.read_csv(DATA_PATH)
raw_data['PctCantada'] = raw_data['PctCantada'] / 100
raw_data['PctRap'] = raw_data['PctRap'] / 100
raw_data['duracao'] = raw_data['duracao'] / (60*1000)
raw_data['VolMedio'] = raw_data['VolMedio'].abs()
raw_data['duracao'] = raw_data['duracao'].abs()
raw_data['modo'] = raw_data['modo'].fillna('K')
raw_data['escala_maior'] = raw_data['modo'].apply((lambda mode: scale_to_bool(mode)))
# raw_data['modo'] = raw_data['modo'].apply((lambda mode: scale_to_bool(mode)))
raw_data = bool_to_int(raw_data)
raw_data = bateria_to_bool(raw_data)
raw_data = scale_to_one_hot(raw_data) # Não gostei muito da solução, mas até faz sentido
print(raw_data)
def filter_user(self, user):
user_data = raw_data[raw_data['id_cliente'] == USER]
# definir melhor a interface
def train_test_split(self): #adicionar parametros de entrada
input_data = user_data.drop(columns = ["data_curtida", "id_cliente"])[user_data['id_cliente'] == USER]
# X = (.to_numpy()
X = input_data.drop(columns = ["gostou"]).to_numpy()
Y = input_data["gostou"].to_numpy()
Y = Y.ravel()
print(Y.shape)
print(X.shape)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,random_state=109) # 70% training and 30% testv
return X_train, X_test, y_train, y_test
def SMOTE_oversampling(self):
# colocar uns parametros uteis de entrada
# Teste do SMOTE
# sm = SMOTE(random_state=42)
# X_train_res, Y_train_res = sm.fit_resample(X_train, y_train)
a = np.arange(0,8)
b = np.array([11])
c = np.arange(15,44)
categorical_index = np.concatenate([a,c])
categorical_index = np.concatenate([categorical_index,b])
categorical_index = list(categorical_index)
# Teste SMOTE-NC
smote_nc_over = SMOTENC(categorical_features=categorical_index, random_state=0)
under = RandomUnderSampler(sampling_strategy='majority',random_state=0)
steps = [('o', smote_nc_over), ('u', under)]
pipeline = Pipeline(steps=steps)
X_train_res, Y_train_res = pipeline.fit_resample(X_train, y_train)
# X_train_res, Y_train_res = smote_nc_over.fit_resample(X_train, y_train)
def check_balancing(self):
## Count elements
count_plus = 0
count_minus = 0
for y_val in Y_train_res:
if y_val == 1:
count_plus +=1
else:
count_minus +=1
print("Positive examples:",count_plus)
print("Negative examples:",count_minus)
# Talvez criar um método que aplique todos os passos pra evitar linhas de código -> Mas acho que será só isso o dataset, não deve haver problemas extras
# enc = preprocessing.OneHotEncoder()
# X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']]
# enc.fit(X)
# transform = enc.transform([['female', 'from US', 'uses Safari'],
# ['male', 'from Europe', 'uses Safari']]).toarray()
# print(X)
# print(transform)
columns = ['Tem_Instr_Violao_Viola', 'Tem_Instr_Guitarra', 'Tem_Instr_Cavaco',
'Tem_Instr_Sintetizador_Teclado', 'Tem_Instr_Piano', 'Tem_Instr_Metais',
'Tem_Instr_Madeiras', 'Tem_Instr_Cordas', 'c' ,'K' ,'c#','C' ,'C#','d' ,'d#','D' ,'D#','e' ,'E' ,'f' ,'f#','F' ,'F#','g' ,'g#','G' ,'G#','a' ,'a#','A' ,'A#','B' ,'b']
# métodos já implementados
def bool_to_int(dataframe: pd.DataFrame) -> pd.DataFrame:
'''
Converte as colunas do tipo bool para o tipo int (0 e 1).
'''
columns = ['Tem_Instr_Violao_Viola', 'Tem_Instr_Guitarra', 'Tem_Instr_Cavaco',
'Tem_Instr_Sintetizador_Teclado', 'Tem_Instr_Piano', 'Tem_Instr_Metais',
'Tem_Instr_Madeiras', 'Tem_Instr_Cordas', 'gostou'] # Adicionar as colunas da bateria aqui depois
new_df = deepcopy(dataframe)
for col in columns:
new_df[col] = new_df[col].astype(int)
return new_df
def bateria_to_bool(dataframe: pd.DataFrame) -> pd.DataFrame:
'''
Transforma a coluna bateria para ser representada por binários
Essa função pode ser modificada para processar a coluna bateria de formas distintas.
'''
new_df = deepcopy(dataframe)
new_df['bateria_eletronica'] = (new_df['bateria'] == 'Eletrônica').astype(int)
new_df['bateria_acustica'] = (new_df['bateria'] == 'Acústica').astype(int)
new_df['bateria_nenhuma'] = (new_df['bateria'] == 'Nenhuma').astype(int)
new_df = new_df.drop(columns = ['bateria'])
return new_df
def get_user_data(USER: str, raw_data: pd.DataFrame) -> pd.DataFrame:
user_data = raw_data[raw_data['id_cliente'] == USER]
user_data = bool_to_int(user_data)
user_data = bateria_to_bool(user_data)
user_data['modo'] = user_data['modo'].fillna("K")
return user_data
# métodos da analise exploratoria
def scale_to_bool(scale):
string = {
'K' : 1,
'c' : 0,
'c#' : 0,
'C' : 1,
'C#' : 1,
'd' : 0,
'd#' : 0,
'D' : 1,
'D#' : 1,
'e' : 0,
'E' : 1,
'f' : 0,
'f#' : 0,
'F' : 1,
'F#' : 1,
'g' : 0,
'g#' : 0,
'G' : 1,
'G#' : 1,
'a' : 0,
'a#' : 0,
'A' : 1,
'A#' : 1,
'b' : 0,
'B' : 1
}[scale]
return string
# One hot encoding the mode feature data:
def scale_to_one_hot(raw_data: pd.DataFrame):
new_df = deepcopy(raw_data)
mode_df = pd.get_dummies(new_df["modo"])
new_df = new_df.drop(columns = ['modo'])
# new_df.merge(mode_df,how='left', on='duracao')
# Place the DataFrames side by side
new_df = pd.concat([new_df,mode_df], axis=1)
return new_df
raw_data = pd.read_csv(DATA_PATH)
raw_data['PctCantada'] = raw_data['PctCantada'] / 100
raw_data['PctRap'] = raw_data['PctRap'] / 100
raw_data['duracao'] = raw_data['duracao'] / (60*1000)
raw_data['VolMedio'] = raw_data['VolMedio'].abs()
raw_data['duracao'] = raw_data['duracao'].abs()
raw_data['modo'] = raw_data['modo'].fillna('K')
raw_data['escala_maior'] = raw_data['modo'].apply((lambda mode: scale_to_bool(mode)))
# raw_data['modo'] = raw_data['modo'].apply((lambda mode: scale_to_bool(mode)))
raw_data = bool_to_int(raw_data)
raw_data = bateria_to_bool(raw_data)
raw_data = scale_to_one_hot(raw_data) # Não gostei muito da solução, mas até faz sentido
print(raw_data)
USER = '#ID9181'
# USER = '#ID4940'
user_data = raw_data[raw_data['id_cliente'] == USER]
input_data = user_data.drop(columns = ["data_curtida", "id_cliente"])[user_data['id_cliente'] == USER]
# X = (.to_numpy()
X = input_data.drop(columns = ["gostou"]).to_numpy()
Y = input_data["gostou"].to_numpy()
Y = Y.ravel()
print(Y.shape)
print(X.shape)
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,random_state=109) # 70% training and 30% testv
# Teste do SMOTE
# sm = SMOTE(random_state=42)
# X_train_res, Y_train_res = sm.fit_resample(X_train, y_train)
from imblearn.pipeline import Pipeline
from imblearn.under_sampling import RandomUnderSampler
a = np.arange(0,8)
b = np.array([11])
c = np.arange(15,44)
categorical_index = np.concatenate([a,c])
categorical_index = np.concatenate([categorical_index,b])
categorical_index = list(categorical_index)
# Teste SMOTE-NC
smote_nc_over = SMOTENC(categorical_features=categorical_index, random_state=0)
under = RandomUnderSampler(sampling_strategy='majority',random_state=0)
steps = [('o', smote_nc_over), ('u', under)]
pipeline = Pipeline(steps=steps)
X_train_res, Y_train_res = pipeline.fit_resample(X_train, y_train)
# X_train_res, Y_train_res = smote_nc_over.fit_resample(X_train, y_train)
# Checar se o dataset ficou balanceado
count_plus = 0
count_minus = 0
for y_val in Y_train_res:
if y_val == 1:
count_plus +=1
else:
count_minus +=1
print("Positive examples:",count_plus)
print("Negative examples:",count_minus)
```
## Testando um classificador para ver se a estrutura do dataset ficou correta
```
clf = DecisionTreeClassifier()
# clf.fit(X_train, y_train) # Sem SMOTE
clf.fit(X_train_res, Y_train_res) # Com SMOTE
y_pred = clf.predict(X_test)
from sklearn import metrics
# Model Accuracy: how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print("Precision:",metrics.precision_score(y_test, y_pred))
# Model Recall: what percentage of positive tuples are labelled as such?
print("Recall:",metrics.recall_score(y_test, y_pred))
metrics.f1_score(y_test, y_pred)
```
## TODO: plotar curva ROC e acurácia para comparar os resultados
```
raw_data[raw_data['id_cliente'] == '#ID4940']['gostou'].value_counts() # Verificar se o dataset está desbalanceado.
def plot_auc(y_test, y_pred):
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred, pos_label=1)
plt.plot(fpr, tpr, color='red', lw=2)
plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
return
plot_auc(y_test, y_pred)
```
|
github_jupyter
|
import numpy as np
import pandas as pd
from copy import deepcopy # Necessário para copiar os dados dentro de funções e evitar alterações inplace dos dados
# Isso para que as funções recebam um dado e gerem um dado novo, mantendo o original inalterado.
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE, SMOTENC
from imblearn.pipeline import Pipeline
from imblearn.under_sampling import RandomUnderSampler
DATA_PATH = '../dados/dados_treino.csv'
raw_data = pd.read_csv(DATA_PATH)
raw_data.info() # Informaçãões gerais do dataset. Permite ver se há valores NULL.
raw_data.drop_duplicates().info()
raw_data.head()
# Dataset de referencia:
import sklearn.datasets
from sklearn import preprocessing
X,y = sklearn.datasets.load_boston(return_X_y=True)
print(type(X))
class Preprocessor():
'''
Saída final esperada: um dicionário com as saídas
'''
# colocar as variáveis de interesse da classe:
# especialmente o dataframe base, e dados de trabalho
def __init__(self):
# colocar as variáveis de interesse da classe:
# especialmente o dataframe de entrada
pass
def _bool_to_int(self, dataframe: pd.DataFrame) -> pd.DataFrame:
'''
Converte as colunas do tipo bool para o tipo int (0 e 1).
'''
columns = ['Tem_Instr_Violao_Viola', 'Tem_Instr_Guitarra', 'Tem_Instr_Cavaco',
'Tem_Instr_Sintetizador_Teclado', 'Tem_Instr_Piano', 'Tem_Instr_Metais',
'Tem_Instr_Madeiras', 'Tem_Instr_Cordas', 'gostou'] # Adicionar as colunas da bateria aqui depois
new_df = deepcopy(dataframe)
for col in columns:
new_df[col] = new_df[col].astype(int)
return new_df
def _bateria_to_bool(self, dataframe: pd.DataFrame) -> pd.DataFrame:
'''
Transforma a coluna bateria para ser representada por binários
Essa função pode ser modificada para processar a coluna bateria de formas distintas.
'''
new_df = deepcopy(dataframe)
new_df['bateria_eletronica'] = (new_df['bateria'] == 'Eletrônica').astype(int)
new_df['bateria_acustica'] = (new_df['bateria'] == 'Acústica').astype(int)
new_df['bateria_nenhuma'] = (new_df['bateria'] == 'Nenhuma').astype(int)
new_df = new_df.drop(columns = ['bateria'])
return new_df
def _get_user_data(self, USER: str, raw_data: pd.DataFrame) -> pd.DataFrame:
user_data = raw_data[raw_data['id_cliente'] == USER]
user_data = bool_to_int(user_data)
user_data = bateria_to_bool(user_data)
user_data['modo'] = user_data['modo'].fillna("K")
return user_data
def scale_to_bool(self,scale):
string = {
'K' : 1,
'c' : 0,
'c#' : 0,
'C' : 1,
'C#' : 1,
'd' : 0,
'd#' : 0,
'D' : 1,
'D#' : 1,
'e' : 0,
'E' : 1,
'f' : 0,
'f#' : 0,
'F' : 1,
'F#' : 1,
'g' : 0,
'g#' : 0,
'G' : 1,
'G#' : 1,
'a' : 0,
'a#' : 0,
'A' : 1,
'A#' : 1,
'b' : 0,
'B' : 1
}[scale]
return string
def scale_to_one_hot(self, raw_data: pd.DataFrame):
# One hot encoding the mode feature data
new_df = deepcopy(raw_data)
mode_df = pd.get_dummies(new_df["modo"])
new_df = new_df.drop(columns = ['modo'])
# new_df.merge(mode_df,how='left', on='duracao')
# Place the DataFrames side by side
new_df = pd.concat([new_df,mode_df], axis=1)
return new_df
def preprocess(self):
'''
Aplica todas as estratégias de preprocessamento para adequar à estrutura entendida pelo sklearn
'''
raw_data = pd.read_csv(DATA_PATH)
raw_data['PctCantada'] = raw_data['PctCantada'] / 100
raw_data['PctRap'] = raw_data['PctRap'] / 100
raw_data['duracao'] = raw_data['duracao'] / (60*1000)
raw_data['VolMedio'] = raw_data['VolMedio'].abs()
raw_data['duracao'] = raw_data['duracao'].abs()
raw_data['modo'] = raw_data['modo'].fillna('K')
raw_data['escala_maior'] = raw_data['modo'].apply((lambda mode: scale_to_bool(mode)))
# raw_data['modo'] = raw_data['modo'].apply((lambda mode: scale_to_bool(mode)))
raw_data = bool_to_int(raw_data)
raw_data = bateria_to_bool(raw_data)
raw_data = scale_to_one_hot(raw_data) # Não gostei muito da solução, mas até faz sentido
print(raw_data)
def filter_user(self, user):
user_data = raw_data[raw_data['id_cliente'] == USER]
# definir melhor a interface
def train_test_split(self): #adicionar parametros de entrada
input_data = user_data.drop(columns = ["data_curtida", "id_cliente"])[user_data['id_cliente'] == USER]
# X = (.to_numpy()
X = input_data.drop(columns = ["gostou"]).to_numpy()
Y = input_data["gostou"].to_numpy()
Y = Y.ravel()
print(Y.shape)
print(X.shape)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,random_state=109) # 70% training and 30% testv
return X_train, X_test, y_train, y_test
def SMOTE_oversampling(self):
# colocar uns parametros uteis de entrada
# Teste do SMOTE
# sm = SMOTE(random_state=42)
# X_train_res, Y_train_res = sm.fit_resample(X_train, y_train)
a = np.arange(0,8)
b = np.array([11])
c = np.arange(15,44)
categorical_index = np.concatenate([a,c])
categorical_index = np.concatenate([categorical_index,b])
categorical_index = list(categorical_index)
# Teste SMOTE-NC
smote_nc_over = SMOTENC(categorical_features=categorical_index, random_state=0)
under = RandomUnderSampler(sampling_strategy='majority',random_state=0)
steps = [('o', smote_nc_over), ('u', under)]
pipeline = Pipeline(steps=steps)
X_train_res, Y_train_res = pipeline.fit_resample(X_train, y_train)
# X_train_res, Y_train_res = smote_nc_over.fit_resample(X_train, y_train)
def check_balancing(self):
## Count elements
count_plus = 0
count_minus = 0
for y_val in Y_train_res:
if y_val == 1:
count_plus +=1
else:
count_minus +=1
print("Positive examples:",count_plus)
print("Negative examples:",count_minus)
# Talvez criar um método que aplique todos os passos pra evitar linhas de código -> Mas acho que será só isso o dataset, não deve haver problemas extras
# enc = preprocessing.OneHotEncoder()
# X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']]
# enc.fit(X)
# transform = enc.transform([['female', 'from US', 'uses Safari'],
# ['male', 'from Europe', 'uses Safari']]).toarray()
# print(X)
# print(transform)
columns = ['Tem_Instr_Violao_Viola', 'Tem_Instr_Guitarra', 'Tem_Instr_Cavaco',
'Tem_Instr_Sintetizador_Teclado', 'Tem_Instr_Piano', 'Tem_Instr_Metais',
'Tem_Instr_Madeiras', 'Tem_Instr_Cordas', 'c' ,'K' ,'c#','C' ,'C#','d' ,'d#','D' ,'D#','e' ,'E' ,'f' ,'f#','F' ,'F#','g' ,'g#','G' ,'G#','a' ,'a#','A' ,'A#','B' ,'b']
# métodos já implementados
def bool_to_int(dataframe: pd.DataFrame) -> pd.DataFrame:
'''
Converte as colunas do tipo bool para o tipo int (0 e 1).
'''
columns = ['Tem_Instr_Violao_Viola', 'Tem_Instr_Guitarra', 'Tem_Instr_Cavaco',
'Tem_Instr_Sintetizador_Teclado', 'Tem_Instr_Piano', 'Tem_Instr_Metais',
'Tem_Instr_Madeiras', 'Tem_Instr_Cordas', 'gostou'] # Adicionar as colunas da bateria aqui depois
new_df = deepcopy(dataframe)
for col in columns:
new_df[col] = new_df[col].astype(int)
return new_df
def bateria_to_bool(dataframe: pd.DataFrame) -> pd.DataFrame:
'''
Transforma a coluna bateria para ser representada por binários
Essa função pode ser modificada para processar a coluna bateria de formas distintas.
'''
new_df = deepcopy(dataframe)
new_df['bateria_eletronica'] = (new_df['bateria'] == 'Eletrônica').astype(int)
new_df['bateria_acustica'] = (new_df['bateria'] == 'Acústica').astype(int)
new_df['bateria_nenhuma'] = (new_df['bateria'] == 'Nenhuma').astype(int)
new_df = new_df.drop(columns = ['bateria'])
return new_df
def get_user_data(USER: str, raw_data: pd.DataFrame) -> pd.DataFrame:
user_data = raw_data[raw_data['id_cliente'] == USER]
user_data = bool_to_int(user_data)
user_data = bateria_to_bool(user_data)
user_data['modo'] = user_data['modo'].fillna("K")
return user_data
# métodos da analise exploratoria
def scale_to_bool(scale):
string = {
'K' : 1,
'c' : 0,
'c#' : 0,
'C' : 1,
'C#' : 1,
'd' : 0,
'd#' : 0,
'D' : 1,
'D#' : 1,
'e' : 0,
'E' : 1,
'f' : 0,
'f#' : 0,
'F' : 1,
'F#' : 1,
'g' : 0,
'g#' : 0,
'G' : 1,
'G#' : 1,
'a' : 0,
'a#' : 0,
'A' : 1,
'A#' : 1,
'b' : 0,
'B' : 1
}[scale]
return string
# One hot encoding the mode feature data:
def scale_to_one_hot(raw_data: pd.DataFrame):
new_df = deepcopy(raw_data)
mode_df = pd.get_dummies(new_df["modo"])
new_df = new_df.drop(columns = ['modo'])
# new_df.merge(mode_df,how='left', on='duracao')
# Place the DataFrames side by side
new_df = pd.concat([new_df,mode_df], axis=1)
return new_df
raw_data = pd.read_csv(DATA_PATH)
raw_data['PctCantada'] = raw_data['PctCantada'] / 100
raw_data['PctRap'] = raw_data['PctRap'] / 100
raw_data['duracao'] = raw_data['duracao'] / (60*1000)
raw_data['VolMedio'] = raw_data['VolMedio'].abs()
raw_data['duracao'] = raw_data['duracao'].abs()
raw_data['modo'] = raw_data['modo'].fillna('K')
raw_data['escala_maior'] = raw_data['modo'].apply((lambda mode: scale_to_bool(mode)))
# raw_data['modo'] = raw_data['modo'].apply((lambda mode: scale_to_bool(mode)))
raw_data = bool_to_int(raw_data)
raw_data = bateria_to_bool(raw_data)
raw_data = scale_to_one_hot(raw_data) # Não gostei muito da solução, mas até faz sentido
print(raw_data)
USER = '#ID9181'
# USER = '#ID4940'
user_data = raw_data[raw_data['id_cliente'] == USER]
input_data = user_data.drop(columns = ["data_curtida", "id_cliente"])[user_data['id_cliente'] == USER]
# X = (.to_numpy()
X = input_data.drop(columns = ["gostou"]).to_numpy()
Y = input_data["gostou"].to_numpy()
Y = Y.ravel()
print(Y.shape)
print(X.shape)
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,random_state=109) # 70% training and 30% testv
# Teste do SMOTE
# sm = SMOTE(random_state=42)
# X_train_res, Y_train_res = sm.fit_resample(X_train, y_train)
from imblearn.pipeline import Pipeline
from imblearn.under_sampling import RandomUnderSampler
a = np.arange(0,8)
b = np.array([11])
c = np.arange(15,44)
categorical_index = np.concatenate([a,c])
categorical_index = np.concatenate([categorical_index,b])
categorical_index = list(categorical_index)
# Teste SMOTE-NC
smote_nc_over = SMOTENC(categorical_features=categorical_index, random_state=0)
under = RandomUnderSampler(sampling_strategy='majority',random_state=0)
steps = [('o', smote_nc_over), ('u', under)]
pipeline = Pipeline(steps=steps)
X_train_res, Y_train_res = pipeline.fit_resample(X_train, y_train)
# X_train_res, Y_train_res = smote_nc_over.fit_resample(X_train, y_train)
# Checar se o dataset ficou balanceado
count_plus = 0
count_minus = 0
for y_val in Y_train_res:
if y_val == 1:
count_plus +=1
else:
count_minus +=1
print("Positive examples:",count_plus)
print("Negative examples:",count_minus)
clf = DecisionTreeClassifier()
# clf.fit(X_train, y_train) # Sem SMOTE
clf.fit(X_train_res, Y_train_res) # Com SMOTE
y_pred = clf.predict(X_test)
from sklearn import metrics
# Model Accuracy: how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print("Precision:",metrics.precision_score(y_test, y_pred))
# Model Recall: what percentage of positive tuples are labelled as such?
print("Recall:",metrics.recall_score(y_test, y_pred))
metrics.f1_score(y_test, y_pred)
raw_data[raw_data['id_cliente'] == '#ID4940']['gostou'].value_counts() # Verificar se o dataset está desbalanceado.
def plot_auc(y_test, y_pred):
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred, pos_label=1)
plt.plot(fpr, tpr, color='red', lw=2)
plt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
return
plot_auc(y_test, y_pred)
| 0.427755 | 0.675965 |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Create-an-interactive-map" data-toc-modified-id="Create-an-interactive-map-1"><span class="toc-item-num">1 </span>Create an interactive map</a></span></li><li><span><a href="#Add-basemaps" data-toc-modified-id="Add-basemaps-2"><span class="toc-item-num">2 </span>Add basemaps</a></span></li><li><span><a href="#Add-WMS-and-XYZ-tile-layers" data-toc-modified-id="Add-WMS-and-XYZ-tile-layers-3"><span class="toc-item-num">3 </span>Add WMS and XYZ tile layers</a></span></li><li><span><a href="#Add-Earth-Engine-data-layers" data-toc-modified-id="Add-Earth-Engine-data-layers-4"><span class="toc-item-num">4 </span>Add Earth Engine data layers</a></span></li><li><span><a href="#Search-Earth-Engine-data-catalog" data-toc-modified-id="Search-Earth-Engine-data-catalog-5"><span class="toc-item-num">5 </span>Search Earth Engine data catalog</a></span></li><li><span><a href="#Search-Earth-Engine-API-documentation" data-toc-modified-id="Search-Earth-Engine-API-documentation-6"><span class="toc-item-num">6 </span>Search Earth Engine API documentation</a></span></li><li><span><a href="#Use-Inspector-tool" data-toc-modified-id="Use-Inspector-tool-7"><span class="toc-item-num">7 </span>Use Inspector tool</a></span></li><li><span><a href="#Use-Plotting-tool" data-toc-modified-id="Use-Plotting-tool-8"><span class="toc-item-num">8 </span>Use Plotting tool</a></span></li><li><span><a href="#Create-a-split-panel-map" data-toc-modified-id="Create-a-split-panel-map-9"><span class="toc-item-num">9 </span>Create a split-panel map</a></span></li><li><span><a href="#Add-marker-cluster" data-toc-modified-id="Add-marker-cluster-10"><span class="toc-item-num">10 </span>Add marker cluster</a></span></li><li><span><a href="#Add-customized-legends" data-toc-modified-id="Add-customized-legends-11"><span class="toc-item-num">11 </span>Add customized legends</a></span></li><li><span><a href="#Use-Drawing-tools" data-toc-modified-id="Use-Drawing-tools-12"><span class="toc-item-num">12 </span>Use Drawing tools</a></span></li><li><span><a href="#Convert-JavaScripts-to-Python" data-toc-modified-id="Convert-JavaScripts-to-Python-13"><span class="toc-item-num">13 </span>Convert JavaScripts to Python</a></span></li><li><span><a href="#Use-shapefiles" data-toc-modified-id="Use-shapefiles-14"><span class="toc-item-num">14 </span>Use shapefiles</a></span></li><li><span><a href="#Create-Landsat-timelapse" data-toc-modified-id="Create-Landsat-timelapse-15"><span class="toc-item-num">15 </span>Create Landsat timelapse</a></span></li><li><span><a href="#Use-time-series-inspector" data-toc-modified-id="Use-time-series-inspector-16"><span class="toc-item-num">16 </span>Use time-series inspector</a></span></li><li><span><a href="#Export-images" data-toc-modified-id="Export-images-17"><span class="toc-item-num">17 </span>Export images</a></span></li></ul></div>
```
import ee
import geemap
```
## Create an interactive map
```
Map = geemap.Map(center=(40, -100), zoom=4)
Map
```
## Add basemaps
```
Map = geemap.Map()
Map
Map.add_basemap('HYBRID')
Map.add_basemap('OpenTopoMap')
Map = geemap.Map()
Map.basemap_demo()
Map
```
## Add WMS and XYZ tile layers
```
Map = geemap.Map()
Map
# https://viewer.nationalmap.gov/services/
url = 'https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}'
Map.add_tile_layer(url, name='Google Satellite', attribution='Google')
naip_url = 'https://services.nationalmap.gov/arcgis/services/USGSNAIPImagery/ImageServer/WMSServer?'
Map.add_wms_layer(url=naip_url, layers='0', name='NAIP Imagery', format='image/png', shown=True)
```
## Add Earth Engine data layers
```
Map = geemap.Map()
Map
# Add Earth Engine dataset
dem = ee.Image('USGS/SRTMGL1_003')
landcover = ee.Image("ESA/GLOBCOVER_L4_200901_200912_V2_3").select('landcover')
landsat7 = ee.Image('LE7_TOA_5YEAR/1999_2003')
states = ee.FeatureCollection("TIGER/2018/States")
# Set visualization parameters.
vis_params = {
'min': 0,
'max': 4000,
'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']}
# Add Earth Eninge layers to Map
Map.addLayer(dem, vis_params, 'STRM DEM', True, 0.5)
Map.addLayer(landcover, {}, 'Land cover')
Map.addLayer(landsat7, {'bands': ['B4', 'B3', 'B2'], 'min': 20, 'max': 200}, 'Landsat 7')
Map.addLayer(states, {}, "US States")
```
## Search Earth Engine data catalog
```
Map = geemap.Map()
Map
Map.search_locations
Map.search_loc_geom
location = Map.search_loc_geom
print(location.getInfo())
```
## Search Earth Engine API documentation
```
geemap.ee_search()
```
## Use Inspector tool
```
Map = geemap.Map()
# Add Earth Engine dataset
dem = ee.Image('USGS/SRTMGL1_003')
landcover = ee.Image("ESA/GLOBCOVER_L4_200901_200912_V2_3").select('landcover')
landsat7 = ee.Image('LE7_TOA_5YEAR/1999_2003')
states = ee.FeatureCollection("TIGER/2018/States")
# Set visualization parameters.
vis_params = {
'min': 0,
'max': 4000,
'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']}
# Add Earth Eninge layers to Map
Map.addLayer(dem, vis_params, 'STRM DEM', True, 0.5)
Map.addLayer(landcover, {}, 'Land cover')
Map.addLayer(landsat7, {'bands': ['B4', 'B3', 'B2'], 'min': 20, 'max': 200}, 'Landsat 7')
Map.addLayer(states, {}, "US States")
Map
```
## Use Plotting tool
```
Map = geemap.Map()
landsat7 = ee.Image('LE7_TOA_5YEAR/1999_2003') \
.select([0, 1, 2, 3, 4, 6])
landsat_vis = {
'bands': ['B4', 'B3', 'B2'],
'gamma': 1.4
}
Map.addLayer(landsat7, landsat_vis, "LE7_TOA_5YEAR/1999_2003")
hyperion = ee.ImageCollection('EO1/HYPERION') \
.filter(ee.Filter.date('2016-01-01', '2017-03-01'))
hyperion_vis = {
'min': 1000.0,
'max': 14000.0,
'gamma': 2.5,
}
Map.addLayer(hyperion, hyperion_vis, 'EO1/HYPERION')
Map
Map.set_plot_options(plot_type='bar', add_marker_cluster=True)
```
## Create a split-panel map
```
Map = geemap.Map()
Map.split_map(left_layer='HYBRID', right_layer='ROADMAP')
Map
Map = geemap.Map()
Map.split_map(left_layer='NLCD 2016 CONUS Land Cover', right_layer='NLCD 2001 CONUS Land Cover')
Map
nlcd_2001 = ee.Image('USGS/NLCD/NLCD2001').select('landcover')
nlcd_2016 = ee.Image('USGS/NLCD/NLCD2016').select('landcover')
left_layer = geemap.ee_tile_layer(nlcd_2001, {}, 'NLCD 2001')
right_layer = geemap.ee_tile_layer(nlcd_2016, {}, 'NLCD 2016')
Map = geemap.Map()
Map.split_map(left_layer, right_layer)
Map
```
## Add marker cluster
```
import geemap
import json
import os
import requests
from geemap import geojson_to_ee, ee_to_geojson
from ipyleaflet import GeoJSON, Marker, MarkerCluster
Map = geemap.Map()
Map
file_path = os.path.join(os.getcwd(), 'us-cities.json')
if not os.path.exists(file_path):
url = 'https://github.com/giswqs/geemap/raw/master/examples/data/us-cities.json'
r = requests.get(url)
with open(file_path, 'w') as f:
f.write(r.content.decode("utf-8"))
with open(file_path) as f:
json_data = json.load(f)
maker_cluster = MarkerCluster(
markers=[Marker(location=feature['geometry']['coordinates'][::-1]) for feature in json_data['features']],
name = 'Markers')
Map.add_layer(maker_cluster)
ee_fc = geojson_to_ee(json_data)
Map.addLayer(ee_fc, {}, "US Cities EE")
```
## Add customized legends
```
Map = geemap.Map()
Map.add_basemap('HYBRID')
landcover = ee.Image('USGS/NLCD/NLCD2016').select('landcover')
Map.addLayer(landcover, {}, 'NLCD Land Cover')
Map.add_legend(builtin_legend='NLCD')
Map
Map = geemap.Map()
Map.add_basemap('HYBRID')
Map.add_basemap('FWS NWI Wetlands')
Map.add_legend(builtin_legend='NWI')
Map
Map = geemap.Map()
legend_dict = {
'11 Open Water': '466b9f',
'12 Perennial Ice/Snow': 'd1def8',
'21 Developed, Open Space': 'dec5c5',
'22 Developed, Low Intensity': 'd99282',
'23 Developed, Medium Intensity': 'eb0000',
'24 Developed High Intensity': 'ab0000',
'31 Barren Land (Rock/Sand/Clay)': 'b3ac9f',
'41 Deciduous Forest': '68ab5f',
'42 Evergreen Forest': '1c5f2c',
'43 Mixed Forest': 'b5c58f',
'51 Dwarf Scrub': 'af963c',
'52 Shrub/Scrub': 'ccb879',
'71 Grassland/Herbaceous': 'dfdfc2',
'72 Sedge/Herbaceous': 'd1d182',
'73 Lichens': 'a3cc51',
'74 Moss': '82ba9e',
'81 Pasture/Hay': 'dcd939',
'82 Cultivated Crops': 'ab6c28',
'90 Woody Wetlands': 'b8d9eb',
'95 Emergent Herbaceous Wetlands': '6c9fb8'
}
landcover = ee.Image('USGS/NLCD/NLCD2016').select('landcover')
Map.addLayer(landcover, {}, 'NLCD Land Cover')
Map.add_legend(legend_title="NLCD Land Cover Classification", legend_dict=legend_dict)
Map
# https://developers.google.com/earth-engine/datasets/catalog/MODIS_051_MCD12Q1
Map = geemap.Map()
ee_class_table = """
Value Color Description
0 1c0dff Water
1 05450a Evergreen needleleaf forest
2 086a10 Evergreen broadleaf forest
3 54a708 Deciduous needleleaf forest
4 78d203 Deciduous broadleaf forest
5 009900 Mixed forest
6 c6b044 Closed shrublands
7 dcd159 Open shrublands
8 dade48 Woody savannas
9 fbff13 Savannas
10 b6ff05 Grasslands
11 27ff87 Permanent wetlands
12 c24f44 Croplands
13 a5a5a5 Urban and built-up
14 ff6d4c Cropland/natural vegetation mosaic
15 69fff8 Snow and ice
16 f9ffa4 Barren or sparsely vegetated
254 ffffff Unclassified
"""
landcover = ee.Image('MODIS/051/MCD12Q1/2013_01_01') \
.select('Land_Cover_Type_1')
Map.setCenter(6.746, 46.529, 2)
Map.addLayer(landcover, {}, 'MODIS Land Cover')
legend_dict = geemap.legend_from_ee(ee_class_table)
Map.add_legend(legend_title="MODIS Global Land Cover", legend_dict=legend_dict)
Map
```
## Use Drawing tools
```
Map = geemap.Map()
Map
# Add Earth Engine dataset
image = ee.Image('USGS/SRTMGL1_003')
# Set visualization parameters.
vis_params = {
'min': 0,
'max': 4000,
'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']}
# Add Earth Engine DEM to map
Map.addLayer(image, vis_params, 'SRTM DEM')
states = ee.FeatureCollection("TIGER/2018/States")
Map.addLayer(states, {}, 'US States')
Map.draw_features
```
## Convert JavaScripts to Python
You can simply copy and paste your GEE JavaScripts into a code block wrapped with trip quotes and pass it to a variable.
For example, you can grap GEE JavaScripts from [GEE Documentation](https://developers.google.com/earth-engine/image_visualization).
```
js_snippet = """
// Load an image.
var image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318');
// Define the visualization parameters.
var vizParams = {
bands: ['B5', 'B4', 'B3'],
min: 0,
max: 0.5,
gamma: [0.95, 1.1, 1]
};
// Center the map and display the image.
Map.setCenter(-122.1899, 37.5010, 10); // San Francisco Bay
Map.addLayer(image, vizParams, 'false color composite');
"""
geemap.js_snippet_to_py(js_snippet, add_new_cell=True, import_ee=True, import_geemap=True, show_map=True)
import ee
import geemap
Map = geemap.Map()
ee.Initialize()
# Load an image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
# Define the visualization parameters.
vizParams = {
'bands': ['B5', 'B4', 'B3'],
'min': 0,
'max': 0.5,
'gamma': [0.95, 1.1, 1]
}
# Center the map and display the image.
Map.setCenter(-122.1899, 37.5010, 10); # San Francisco Bay
Map.addLayer(image, vizParams, 'False color composite')
Map
js_snippet = """
// Load an image.
var image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318');
// Create an NDWI image, define visualization parameters and display.
var ndwi = image.normalizedDifference(['B3', 'B5']);
var ndwiViz = {min: 0.5, max: 1, palette: ['00FFFF', '0000FF']};
Map.addLayer(ndwi, ndwiViz, 'NDWI', false);
"""
geemap.js_snippet_to_py(js_snippet)
import ee
import geemap
Map = geemap.Map()
ee.Initialize()
# Load an image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
# Create an NDWI image, define visualization parameters and display.
ndwi = image.normalizedDifference(['B3', 'B5'])
ndwiViz = {'min': 0.5, 'max': 1, 'palette': ['00FFFF', '0000FF']}
Map.addLayer(ndwi, ndwiViz, 'NDWI', False)
Map
```
## Use shapefiles
```
Map = geemap.Map()
Map
countries_shp = '../data/countries.shp'
countries = geemap.shp_to_ee(countries_shp)
Map.addLayer(countries, {}, 'Countries')
states_shp = '../data/us-states.shp'
states = geemap.shp_to_ee(states_shp)
Map.addLayer(states, {}, 'US States')
cities_shp = '../data/us-cities.shp'
cities = geemap.shp_to_ee(cities_shp)
Map.addLayer(cities, {}, 'US Cities')
geemap.ee_to_shp(countries, filename='../data/countries_new.shp')
geemap.ee_export_vector(states, filename='../data/states.csv')
```
## Create Landsat timelapse
```
Map = geemap.Map()
Map
label = 'Urban Growth in Las Vegas'
Map.add_landsat_ts_gif(label=label, start_year=1985, bands=['Red', 'Green', 'Blue'], font_color='white', frames_per_second=10, progress_bar_color='blue')
```
## Use time-series inspector
```
naip_ts = geemap.naip_timeseries(start_year=2009, end_year=2018)
layer_names = ['NAIP ' + str(year) for year in range(2009, 2019)]
print(layer_names)
naip_vis = {'bands': ['N', 'R', 'G']}
Map = geemap.Map()
Map.ts_inspector(left_ts=naip_ts, right_ts=naip_ts, left_names=layer_names, right_names=layer_names, left_vis=naip_vis, right_vis=naip_vis)
Map
```
## Export images
```
Map = geemap.Map()
Map
image = ee.Image('LE7_TOA_5YEAR/1999_2003')
landsat_vis = {
'bands': ['B4', 'B3', 'B2'],
'gamma': 1.4
}
Map.addLayer(image, landsat_vis, "LE7_TOA_5YEAR/1999_2003", True, 0.7)
# Draw any shapes on the map using the Drawing tools before executing this code block
feature = Map.draw_last_feature
roi = feature.geometry()
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
filename = os.path.join(out_dir, 'landsat.tif')
geemap.ee_export_image(image, filename=filename, scale=90, region=roi, file_per_band=False)
geemap.ee_export_image(image, filename=filename, scale=90, region=roi, file_per_band=True)
loc = ee.Geometry.Point(-99.2222, 46.7816)
collection = ee.ImageCollection('USDA/NAIP/DOQQ') \
.filterBounds(loc) \
.filterDate('2008-01-01', '2020-01-01') \
.filter(ee.Filter.listContains("system:band_names", "N"))
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
geemap.ee_export_image_collection(collection, out_dir=out_dir)
```
|
github_jupyter
|
import ee
import geemap
Map = geemap.Map(center=(40, -100), zoom=4)
Map
Map = geemap.Map()
Map
Map.add_basemap('HYBRID')
Map.add_basemap('OpenTopoMap')
Map = geemap.Map()
Map.basemap_demo()
Map
Map = geemap.Map()
Map
# https://viewer.nationalmap.gov/services/
url = 'https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}'
Map.add_tile_layer(url, name='Google Satellite', attribution='Google')
naip_url = 'https://services.nationalmap.gov/arcgis/services/USGSNAIPImagery/ImageServer/WMSServer?'
Map.add_wms_layer(url=naip_url, layers='0', name='NAIP Imagery', format='image/png', shown=True)
Map = geemap.Map()
Map
# Add Earth Engine dataset
dem = ee.Image('USGS/SRTMGL1_003')
landcover = ee.Image("ESA/GLOBCOVER_L4_200901_200912_V2_3").select('landcover')
landsat7 = ee.Image('LE7_TOA_5YEAR/1999_2003')
states = ee.FeatureCollection("TIGER/2018/States")
# Set visualization parameters.
vis_params = {
'min': 0,
'max': 4000,
'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']}
# Add Earth Eninge layers to Map
Map.addLayer(dem, vis_params, 'STRM DEM', True, 0.5)
Map.addLayer(landcover, {}, 'Land cover')
Map.addLayer(landsat7, {'bands': ['B4', 'B3', 'B2'], 'min': 20, 'max': 200}, 'Landsat 7')
Map.addLayer(states, {}, "US States")
Map = geemap.Map()
Map
Map.search_locations
Map.search_loc_geom
location = Map.search_loc_geom
print(location.getInfo())
geemap.ee_search()
Map = geemap.Map()
# Add Earth Engine dataset
dem = ee.Image('USGS/SRTMGL1_003')
landcover = ee.Image("ESA/GLOBCOVER_L4_200901_200912_V2_3").select('landcover')
landsat7 = ee.Image('LE7_TOA_5YEAR/1999_2003')
states = ee.FeatureCollection("TIGER/2018/States")
# Set visualization parameters.
vis_params = {
'min': 0,
'max': 4000,
'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']}
# Add Earth Eninge layers to Map
Map.addLayer(dem, vis_params, 'STRM DEM', True, 0.5)
Map.addLayer(landcover, {}, 'Land cover')
Map.addLayer(landsat7, {'bands': ['B4', 'B3', 'B2'], 'min': 20, 'max': 200}, 'Landsat 7')
Map.addLayer(states, {}, "US States")
Map
Map = geemap.Map()
landsat7 = ee.Image('LE7_TOA_5YEAR/1999_2003') \
.select([0, 1, 2, 3, 4, 6])
landsat_vis = {
'bands': ['B4', 'B3', 'B2'],
'gamma': 1.4
}
Map.addLayer(landsat7, landsat_vis, "LE7_TOA_5YEAR/1999_2003")
hyperion = ee.ImageCollection('EO1/HYPERION') \
.filter(ee.Filter.date('2016-01-01', '2017-03-01'))
hyperion_vis = {
'min': 1000.0,
'max': 14000.0,
'gamma': 2.5,
}
Map.addLayer(hyperion, hyperion_vis, 'EO1/HYPERION')
Map
Map.set_plot_options(plot_type='bar', add_marker_cluster=True)
Map = geemap.Map()
Map.split_map(left_layer='HYBRID', right_layer='ROADMAP')
Map
Map = geemap.Map()
Map.split_map(left_layer='NLCD 2016 CONUS Land Cover', right_layer='NLCD 2001 CONUS Land Cover')
Map
nlcd_2001 = ee.Image('USGS/NLCD/NLCD2001').select('landcover')
nlcd_2016 = ee.Image('USGS/NLCD/NLCD2016').select('landcover')
left_layer = geemap.ee_tile_layer(nlcd_2001, {}, 'NLCD 2001')
right_layer = geemap.ee_tile_layer(nlcd_2016, {}, 'NLCD 2016')
Map = geemap.Map()
Map.split_map(left_layer, right_layer)
Map
import geemap
import json
import os
import requests
from geemap import geojson_to_ee, ee_to_geojson
from ipyleaflet import GeoJSON, Marker, MarkerCluster
Map = geemap.Map()
Map
file_path = os.path.join(os.getcwd(), 'us-cities.json')
if not os.path.exists(file_path):
url = 'https://github.com/giswqs/geemap/raw/master/examples/data/us-cities.json'
r = requests.get(url)
with open(file_path, 'w') as f:
f.write(r.content.decode("utf-8"))
with open(file_path) as f:
json_data = json.load(f)
maker_cluster = MarkerCluster(
markers=[Marker(location=feature['geometry']['coordinates'][::-1]) for feature in json_data['features']],
name = 'Markers')
Map.add_layer(maker_cluster)
ee_fc = geojson_to_ee(json_data)
Map.addLayer(ee_fc, {}, "US Cities EE")
Map = geemap.Map()
Map.add_basemap('HYBRID')
landcover = ee.Image('USGS/NLCD/NLCD2016').select('landcover')
Map.addLayer(landcover, {}, 'NLCD Land Cover')
Map.add_legend(builtin_legend='NLCD')
Map
Map = geemap.Map()
Map.add_basemap('HYBRID')
Map.add_basemap('FWS NWI Wetlands')
Map.add_legend(builtin_legend='NWI')
Map
Map = geemap.Map()
legend_dict = {
'11 Open Water': '466b9f',
'12 Perennial Ice/Snow': 'd1def8',
'21 Developed, Open Space': 'dec5c5',
'22 Developed, Low Intensity': 'd99282',
'23 Developed, Medium Intensity': 'eb0000',
'24 Developed High Intensity': 'ab0000',
'31 Barren Land (Rock/Sand/Clay)': 'b3ac9f',
'41 Deciduous Forest': '68ab5f',
'42 Evergreen Forest': '1c5f2c',
'43 Mixed Forest': 'b5c58f',
'51 Dwarf Scrub': 'af963c',
'52 Shrub/Scrub': 'ccb879',
'71 Grassland/Herbaceous': 'dfdfc2',
'72 Sedge/Herbaceous': 'd1d182',
'73 Lichens': 'a3cc51',
'74 Moss': '82ba9e',
'81 Pasture/Hay': 'dcd939',
'82 Cultivated Crops': 'ab6c28',
'90 Woody Wetlands': 'b8d9eb',
'95 Emergent Herbaceous Wetlands': '6c9fb8'
}
landcover = ee.Image('USGS/NLCD/NLCD2016').select('landcover')
Map.addLayer(landcover, {}, 'NLCD Land Cover')
Map.add_legend(legend_title="NLCD Land Cover Classification", legend_dict=legend_dict)
Map
# https://developers.google.com/earth-engine/datasets/catalog/MODIS_051_MCD12Q1
Map = geemap.Map()
ee_class_table = """
Value Color Description
0 1c0dff Water
1 05450a Evergreen needleleaf forest
2 086a10 Evergreen broadleaf forest
3 54a708 Deciduous needleleaf forest
4 78d203 Deciduous broadleaf forest
5 009900 Mixed forest
6 c6b044 Closed shrublands
7 dcd159 Open shrublands
8 dade48 Woody savannas
9 fbff13 Savannas
10 b6ff05 Grasslands
11 27ff87 Permanent wetlands
12 c24f44 Croplands
13 a5a5a5 Urban and built-up
14 ff6d4c Cropland/natural vegetation mosaic
15 69fff8 Snow and ice
16 f9ffa4 Barren or sparsely vegetated
254 ffffff Unclassified
"""
landcover = ee.Image('MODIS/051/MCD12Q1/2013_01_01') \
.select('Land_Cover_Type_1')
Map.setCenter(6.746, 46.529, 2)
Map.addLayer(landcover, {}, 'MODIS Land Cover')
legend_dict = geemap.legend_from_ee(ee_class_table)
Map.add_legend(legend_title="MODIS Global Land Cover", legend_dict=legend_dict)
Map
Map = geemap.Map()
Map
# Add Earth Engine dataset
image = ee.Image('USGS/SRTMGL1_003')
# Set visualization parameters.
vis_params = {
'min': 0,
'max': 4000,
'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5']}
# Add Earth Engine DEM to map
Map.addLayer(image, vis_params, 'SRTM DEM')
states = ee.FeatureCollection("TIGER/2018/States")
Map.addLayer(states, {}, 'US States')
Map.draw_features
js_snippet = """
// Load an image.
var image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318');
// Define the visualization parameters.
var vizParams = {
bands: ['B5', 'B4', 'B3'],
min: 0,
max: 0.5,
gamma: [0.95, 1.1, 1]
};
// Center the map and display the image.
Map.setCenter(-122.1899, 37.5010, 10); // San Francisco Bay
Map.addLayer(image, vizParams, 'false color composite');
"""
geemap.js_snippet_to_py(js_snippet, add_new_cell=True, import_ee=True, import_geemap=True, show_map=True)
import ee
import geemap
Map = geemap.Map()
ee.Initialize()
# Load an image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
# Define the visualization parameters.
vizParams = {
'bands': ['B5', 'B4', 'B3'],
'min': 0,
'max': 0.5,
'gamma': [0.95, 1.1, 1]
}
# Center the map and display the image.
Map.setCenter(-122.1899, 37.5010, 10); # San Francisco Bay
Map.addLayer(image, vizParams, 'False color composite')
Map
js_snippet = """
// Load an image.
var image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318');
// Create an NDWI image, define visualization parameters and display.
var ndwi = image.normalizedDifference(['B3', 'B5']);
var ndwiViz = {min: 0.5, max: 1, palette: ['00FFFF', '0000FF']};
Map.addLayer(ndwi, ndwiViz, 'NDWI', false);
"""
geemap.js_snippet_to_py(js_snippet)
import ee
import geemap
Map = geemap.Map()
ee.Initialize()
# Load an image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
# Create an NDWI image, define visualization parameters and display.
ndwi = image.normalizedDifference(['B3', 'B5'])
ndwiViz = {'min': 0.5, 'max': 1, 'palette': ['00FFFF', '0000FF']}
Map.addLayer(ndwi, ndwiViz, 'NDWI', False)
Map
Map = geemap.Map()
Map
countries_shp = '../data/countries.shp'
countries = geemap.shp_to_ee(countries_shp)
Map.addLayer(countries, {}, 'Countries')
states_shp = '../data/us-states.shp'
states = geemap.shp_to_ee(states_shp)
Map.addLayer(states, {}, 'US States')
cities_shp = '../data/us-cities.shp'
cities = geemap.shp_to_ee(cities_shp)
Map.addLayer(cities, {}, 'US Cities')
geemap.ee_to_shp(countries, filename='../data/countries_new.shp')
geemap.ee_export_vector(states, filename='../data/states.csv')
Map = geemap.Map()
Map
label = 'Urban Growth in Las Vegas'
Map.add_landsat_ts_gif(label=label, start_year=1985, bands=['Red', 'Green', 'Blue'], font_color='white', frames_per_second=10, progress_bar_color='blue')
naip_ts = geemap.naip_timeseries(start_year=2009, end_year=2018)
layer_names = ['NAIP ' + str(year) for year in range(2009, 2019)]
print(layer_names)
naip_vis = {'bands': ['N', 'R', 'G']}
Map = geemap.Map()
Map.ts_inspector(left_ts=naip_ts, right_ts=naip_ts, left_names=layer_names, right_names=layer_names, left_vis=naip_vis, right_vis=naip_vis)
Map
Map = geemap.Map()
Map
image = ee.Image('LE7_TOA_5YEAR/1999_2003')
landsat_vis = {
'bands': ['B4', 'B3', 'B2'],
'gamma': 1.4
}
Map.addLayer(image, landsat_vis, "LE7_TOA_5YEAR/1999_2003", True, 0.7)
# Draw any shapes on the map using the Drawing tools before executing this code block
feature = Map.draw_last_feature
roi = feature.geometry()
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
filename = os.path.join(out_dir, 'landsat.tif')
geemap.ee_export_image(image, filename=filename, scale=90, region=roi, file_per_band=False)
geemap.ee_export_image(image, filename=filename, scale=90, region=roi, file_per_band=True)
loc = ee.Geometry.Point(-99.2222, 46.7816)
collection = ee.ImageCollection('USDA/NAIP/DOQQ') \
.filterBounds(loc) \
.filterDate('2008-01-01', '2020-01-01') \
.filter(ee.Filter.listContains("system:band_names", "N"))
out_dir = os.path.join(os.path.expanduser('~'), 'Downloads')
geemap.ee_export_image_collection(collection, out_dir=out_dir)
| 0.616705 | 0.958265 |
# Data Wrangling with Python
Fall 2021
In this workshop, we'll dive deep into some techniques for cleaning and re-shaping data in Python using the [pandas](https://pandas.pydata.org/docs/) library.
Here's what you can expect to practice:
- working with CSV data from various sources
- reshaping data for analysis
- joining datasets on common elements
- handling text and time series data
- dealing with nulls and duplicates
## Research question
Calculate change in US home prices over a five-year period, using the Zillow Home Value Index, and compare with change in median household income. Identify regions where the two measures diverge.
## Data sources
- [Zillow Home Value Index](https://www.zillow.com/research/data/): "A smoothed, seasonally adjusted measure of the typical home value and market changes across a given region and housing type. It reflects the typical value for homes in the 35th to 65th percentile range."
- [US Census, American Community Survey](https://www.census.gov/programs-surveys/acs) data for median household income over the previous 12 months.
## Setting up
### Library imports
Let's import any libraries we'll need.
We'll do most of our work in `pandas`, which should be available automatically in a Google Colab environment.
```
import pandas as pd
```
We'll also use a library that simplifies the process of retrieving Census data. We may need to install it first.
```
!pip install census==0.8.17
from census import Census
```
### Links & API registration
At this time, you should also [register for an API key](https://api.census.gov/data/key_signup.html) so as to be able to retrieve datasets from census.gov. Once you have completed the form, you should receive your API key at the email address you provided.
The following link will allows us to download Zillow Home Value Index (ZVHI) data. The data covers the time period between January 2000 and August 2021. Values are aggregated at the level of the zip code.
```
zhvi_all_homes = 'https://files.zillowstatic.com/research/public_csvs/zhvi/Zip_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv?t=1631541070'
```
## Loading & exploring data
### Loading data from CSV
The pandas `read_csv` method can load data from a URL as well as a file, provided the data is in the proper format.
```
zhvi_df = pd.read_csv(zhvi_all_homes)
```
### Reshaping data
If we look at the DataFrame's columns, we can see that each month is represented by a discrete column heading, leading to a very wide table. This is done to compress the data to save storage space. (The names of the months appear only once, in the column headings.)
```
zhvi_df.columns
```
The table has 30K+ rows, one row per zip code.
```
len(zhvi_df)
```
For analysis, it's often easier to work with data in so-called "long" format. Meaning that each row corresponds to a single observation.
An observation is a specific measure of one or more variables. In this dataset, we have basically two variables: time and location (zip code). So how can we re-shape this data so that each row contains a single value for ZHVI?
pandas has a handy method called `melt`, which is useful when you have several columns that all contain the same **kind** of measure. Here, each of the month columns contains the same measure, i.e., the ZHVI values for that month. So we can "melt" this table so that all the ZHVI values are in one column, and all the months in another.
And what about the geographical columns? We will retain those, making them what pandas calls "ID variables" -- the idea being that the combination of those columns creates a set of unique identifiers. In this case, that's the geographical location identified by zip code.
In the following method call, I'm referring to the geographical columns by position, which I can do since they precede all of the month columns, by slicing the DataFrame's `columns` object.
```
z_df = zhvi_df.melt(id_vars=zhvi_df.columns[0:9], var_name='month', value_name='value')
```
Now our dataset is much bigger, though it has exactly the same data! But the new shape will make it easier to group our data and filter it in different way.
```
len(z_df)
```
### Correcting data types
It's useful to make sure the datatypes in your DataFrame make sense for what they represent. In this case, our zip codes should be strings, our values floats, and our months datetime objects.
The `dtypes` property displays the type of each column. In pandas, a type of `object` is either for a column of strings or of mixed data types.
```
z_df.dtypes
```
#### Padding strings
Let's convert the zip code fields to strings. One thing you might notice is that by treating the zip code (`RegionName`) as an integer, pandas has truncated zip codes that begin with zero. That could pose problems later, if we want to match this data against other data using the zip code.
We can fix that by converting the `RegionName` column to a string and using the Python string method `zfill`, which adds zeroes to the beginning of a string to make it the required length. The argument to `zfill` is the **total number** of characters in the string. If the string to which you apple `zfill` contains fewer characters than the number you provide, the string will be padded with zeroes to fill it out to the required length. For example, `'7'.zfill(3)` returns `'007'`.
To use `zfill` on the `RegionName` column of our DataFrame, we first have to convert the data to strings, using the `astype` method. Then we can `apply` the `zfill` method to the data in that column. The pandas `apply` method, which takes as its argument another function, executes that function once for each value in the column.
```
z_df['RegionName'] = z_df['RegionName'].astype(str).apply(lambda x: x.zfill(5))
```
#### Converting dates and times
Now let's convert the `month` column to a `datetime` type. This makes it much easier to aggregate on time series. In this case, pandas can interpret the string correctly without our supplying a pattern. In other cases, it may be necessary to provide a second argument to `pd.to_datetime`, indicating the pattern of the string.
```
z_df['month'] = pd.to_datetime(z_df['month'])
```
Now we can filter by parts of the date, for instance, by year. To do this, we use the special `dt` attribute, which has attributes corresponding to day, month, and year.
```
z_df.loc[z_df.month.dt.year == 2021]
```
#### Customizing the numerical format
When dealing with dollar values in the millions, it is useful to limit the number of decimal places to 2 and to add commas separating the thousands, etc. We can set this option globally as follows.
Note that it does not affect the underlying representation of the data (which is still floating-point decimal), just the way it displays on screen.
```
pd.set_option('display.float_format','{:,.2f}'.format)
z_df
```
### Exploring the data
#### Null values
One thing we may want to know about dataset is how many rows have null values. There are other ways to accomplish this, but a straightforward way to get a total by column is to call `sum` on the result of the `DataFrame.isna` method. `isna` returns a new DataFrame where each cell is either `True` or `False`, depending on whether it's null or not. And the `sum` method simply "adds up" these Boolean values, treating `True` as 1 and `False` as 0. The result shows us how many null values are in each column.
```
z_df.isna().sum()
```
We can see that the only columns with nulls are the `Metro` column and the `value` column. The `Metro` column refers to the zip code's metro area; rural zip codes would not have a metro area, so that makes sense. The nulls in the `value` column might be more problematic, depending on our analysis; these are instances where we don't have a valid observation. (Maybe no data were available.)
Here we also have an example of the principle that an aggregate operation on a DataFrame (`sum`) returns a pandas Series.
If we want to examine the null values, we can use the `isna` method to find them.
```
z_df.loc[z_df.value.isna()]
```
#### Checking for duplicates
Another problem frequently encountered is duplicated data. We can check for that using the `duplicated` method, which works like `isna` except that it returns `True` if a value is duplicated within a column. By default, it doesn't mark the first duplicate as a duplicate, but I usually find it helpful to see all the duplicates, so I pass the parameter `keep=False`, which means, "Count the first instance of a duplicated datum as a duplicate."
```
zhvi_df.loc[zhvi_df.RegionName.duplicated(keep=False)]
```
No zip codes in our original dataset are duplicated, so that's good! Note that I didn't call that method on our "melted" dataset, since converting the original table to a "long" version created many duplicates.
In our "melted" dataset, the combination of zip code and month should be unique. We can check for this using the `subset` parameter.
```
z_df.loc[z_df.duplicated(subset=['RegionName', 'month'], keep=False)]
```
### Getting ACS data
To make things more interesting, we're going to combine our Zillow data with a dataset from the US Census. The American Community Survey provides five-year estimates of many important economic indicators, including median household income by zip code.
We can use the Census API to fetch the tables we need. The Python [census](https://pypi.org/project/census/) package provides a convenient wrapper around the API's.
If you submitted the form at the start of our workshop, you should have received an API key via the email address you provided. If you didn't, I'll provide a link to the datasets we're going to use.
#### Retrieving Census data
First we need to initialize the module with our API key.
```
apikey = ''
census = Census(apikey)
```
To access Census tables by API, we need to know the specific variable names. You can find these in the [ACS documentation](https://api.census.gov/data/2019/acs/acs5/variables.html). Using the `census` module, we can specify the ACS 5-year tables and zip code as the geographical level. In addition, you can specify a year as a method parameter, if we want data other than that from the most recent survey.
In order to have some timeseries data, we'll retrieve the data for 2014 and 2019. (It's recommended to use [non-overlapping datasets](https://www.census.gov/data/developers/data-sets/acs-5year.html) for the ACS 5-year surveys because of how the estimates are calculated.)
```
median_income = "B19013_001E"
census = Census(apikey)
acs2019 = census.acs5.zipcode(median_income, '*')
acs2014 = census.acs5.zipcode(median_income, '*', year=2014)
```
#### Creating a DataFrame
The Census API returns the table as a list of dictionaries. You can see that the value for our variable -- median household income -- is provided for each row, as well as a code for the state and the zip code (both strings).
Our first step is to convert these two lists to DataFrames so that we can efficiently merge them with our Zillow data.
```
acs2019
```
pandas has a handy method call `DataFrame.from_records`, which creates a DataFrame from exactly this structure. (The elements of the list are the rows, the keys of the dictionaries are the columns.)
We can also rename our columns for clarity and concision.
```
acs_df = pd.DataFrame.from_records(acs2019)
acs_df = acs_df.rename(columns={'B19013_001E': '2019_median_hhi',
'zip code tabulation area': 'zip_code'})
len(acs_df)
```
#### Concatenating DataFrames
But we have two separate datasets, one from 2019 and one from 2014. Ideally, we want a single table containing all of our ACS data. We'll accomplish that by using pandas' `concat` method, which takes a list of DataFrames and stacks them one on top of the other.
We'll add a column to record the year. And we'll put this code into a Python function, which is good practice for encapsulating our code for reproducibility.
```
def create_acs_df(datasets, years):
'''
Accepts a list of pandas DataFrames to concat and a list of years, which will be added as a column to the resulting DataFrame
'''
# Step 1: Create an empty DataFrame -- for the first dataset, we need something to concat it with
df_all = pd.DataFrame()
# Step 2: Create a for loop: we can use the zip method to loop over the datasets and years at the same time
for dataset, year in zip(datasets, years):
# Create a DataFrame from the current dataset
df = pd.DataFrame.from_records(dataset)
# Create a year column and populate it with the corresponding year
df['year'] = year
# Rename the columns
df = df.rename(columns={'B19013_001E': 'median_hhi',
'zip code tabulation area': 'zip_code'})
# Concat with the previous DataFrame
df_all = pd.concat([df_all, df])
# Don't forget to return something!
return df_all
acs_df = create_acs_df([acs2019, acs2014], [2019, 2014])
acs_df
```
#### Dealing with "bad" data
It looks like we have some outliers here, which may be an artifact of how the data has been reported/coded. We can use the `describe` method to try to spot these. (Ignore the year column; we're only interested in outliers in the `median_hhi` column.
```
acs_df.describe()
```
It doesn't really make sense to have negative median income; and indeed, by looking at the unique values in the column that fall **below** zero, we can see that there's only one value, which may mean any number of things, but for the purposes of this example, we can just discard those rows.
```
acs_df.loc[acs_df.median_hhi < 0].median_hhi.unique()
```
We're using `DataFrame.loc` to limit to the rows with a non-negative value for `median_hhi`. We'll make a copy of the filtered DataFrame, which helps avoid issues as we manipulate this data later.
```
acs_df = acs_df.loc[acs_df.median_hhi >= 0].copy()
acs_df.describe()
```
### Building our joint dataset
In order to compare Zillow home values and median household income by geographical area, it will be useful to have a single table that contains both variables. We'll create this table in a few steps.
#### Creating aligned data
Most important is to make sure that our two variables are aligned, meaning that they represent observations at the same scale.
How well are our datasets aligned?
| Dataset | Measure | Geographic Dimension | Temporal Dimension |
| --- | --- | --- | --- |
| Zillow | Estimated median home value (dollars) | Zipcode | Month |
| ACS | Median household income (dollars) | Zipcode | Year |
Zillow and ACS are measuring different things, but they're both using the median as their statistic, and they're both measuring value in dollars. Geographically, both datasets are aligned at the level of zipcode.
However, the Zillow data are presented on a monthly basis, while the ACS data are available only at the annual level. Moreover, because of the 5-year overlap, we have only two "years" of ACS data to work with. We'll need to adjust our Zillow dataset to bring it into alignment with our ACS data.
(Technically, the ACS data represent five-year aggregates, which are not the same as annual measures. In what follows we are going to take a statistically naive approach to comparing and summarizing these data. Tis approach is not intended to be empirically rigorous; rather, it is meant to illustrate operations with pandas. For more on the appropriate methodology for handling ACS median values, [this guide](http://www.dof.ca.gov/Forecasting/Demographics/Census_Data_Center_Network/documents/How_to_Recalculate_a_Median.pdf) is a good starting point.)
#### Filtering a dataset and checking for missing data
First we limit the Zillow data to the years for which we have ACS data: 2014 and 2019. We can use the `dt` datetime attribute on the `month` column to select particular years with `DataFrame.loc`.
```
z_df = z_df.loc[(z_df.month.dt.year == 2019) | (z_df.month.dt.year == 2014)].copy()
```
To see if there are zipcodes represented in each dataset not present in the other, we can use the `asin` method to compare the values in one DataFrame with the values in another. This code counts how many unique zip codes are in our Zillow dataset that are not present in the ACS dataset
```
len(z_df.loc[~z_df.RegionName.isin(acs_df.zip_code)].RegionName.unique())
```
We can do the same to count how many zip codes in the ACS dataset are missing from the Zillow data.
```
len(acs_df.loc[~acs_df.zip_code.isin(z_df.RegionName)])
```
In this case, we'll simply omit these non-overlapping data points from our merged dataset. But in other cases, it might be important to account for them in some way.
#### Aggregating by time scale
We'll aggregate the Zillow data by year in order to compare with them the ACS data at the same time scale.
We'll use the `DataFrame.groupby` method to group our dataset at the desired level. We're grouping by year, so we can use the `dt.year` attribute on the `month` column as the group key. We can group by multiple columns, so we might want to include other columns, too (except for the `value` column, which contains the data that we're trying to summarize).
We can provide them as a list to the `groupby` method, which use the unique combinations of the values in these columns as the keys for grouping.
```
z_grp = z_df.groupby([z_df.StateName, z_df.City, z_df.Metro, z_df.RegionName, z_df.month.dt.year])
```
Now let's take the `mean` of the `value` column from our grouped DataFrame. Such operations by default return a Series (in this case, with a so-called hierarchical index). But to turn that back to a DataFrame, we can use the `reset_index` method.
```
z_means = z_grp.value.mean()
z_means = z_means.reset_index()
z_means
```
When grouping by multiple columns, it's important to be careful when including columns with null values. Recall from our analysis above that sometimes the `Metro` column is blank in this dataset.
What happens to those rows in our final group?
```
z_means.loc[z_means.Metro.isnull()]
```
By grouping by a column with nulls, we've actually lost a fair amount of data!
```
len(z_df.loc[~z_df.RegionName.isin(z_means.RegionName)].RegionName.unique())
```
To fix this problem, we could omit the `Metro` column from our `groupby` statement. In more recent versions of pandas, we can also add a `dropna=False` parameter to our `groupby` statement, which will keep the null values in the group keys.
```
z_grp = z_df.groupby([z_df.StateName, z_df.City, z_df.Metro, z_df.RegionName, z_df.month.dt.year],
dropna=False)
z_means = z_grp.value.mean()
z_means = z_means.reset_index()
z_means.loc[z_means.Metro.isnull()]
z_means
```
#### Merging data
Now we're ready to merge our Zillow data with our ACS data. The `merge` method performs the equivalent of a SQL join on two DataFrames that share at least one common key. The keys are the columns with values that are the same in both datasets. The following chart shows the keys between our two datasets.
| Measure or Dimension | `z_means` | `acs` |
| --- | --- | --- |
| State | `StateName` | |
| City | `City` | |
| Metro | `Metro` | |
| Zip code | `RegionName` | `zip_code` |
| Year | `month` | `year` |
| State code | | `state` |
| Median income | | `median_hhi` |
| Home value | `value` | |
The keys don't have to have the same column **names**, but they must share at least some of the same **values** in those columns. In our case, the shared values are the zip codes and the years. (Note that both our Zillow and ACS datasets have columns for U.S. state, but the values in those columns do not overlap: the Zillow dataset uses the name of the state, while the ACS data uses a numerical identifier.)
Before merging our datasets, we'll rename the Zillow columns `month` and `value` to more accurately represent their contents. This isn't necessary for merging, but it will make the merged table more legible for us.
```
z_means = z_means.rename(columns={'month': 'year',
'value': 'zhvi'})
```
Now we do the merge, which creates a new DataFrame. The arguments to the `merge` method are as follows:
- `acs_df`: the second DataFrame we want to merge with the first.
- `left_on`: this parameter takes a list of columns in the first DataFrame to use as keys.
- `right_on`: this parameter takes a list of columns in the second DataFrame to use as keys.
You can also specify a `how` parameter, which indicates the kind of join. The default is an **inner** join, meaning that the merged DataFrame will only contains rows where the keys are present in both of the source DataFrames. In this case, an inner join will drop those rows for zip codes that are missing from either the Zillow or the ACS data.
```
z_merged = z_means.merge(acs_df, left_on=['RegionName', 'year'],
right_on=['zip_code', 'year'])
```
We don't need the `state` column from our ACS dataset. Nor do we need two columns of zip codes, so we can drop these from our merged dataset. The `axis` parameter is important in the `drop` method: `axis=1` means "drop columns" (as opposed to rows).
```
z_merged = z_merged.drop(['state', 'zip_code'], axis=1)
```
#### Calculating change over time
As an illustration of further uses for `groupby`, we'll calculate the percentage change between 2014 and 2019 for both the ZHVI and median household income for each zip code.
First, we'll sort the merged dataset by year.
```
z_merged = z_merged.sort_values(by='year')
```
Now we can create new columns to hold the percentage change for each of our measures (home value and income). To calculate the percentage change, we will use the built-in `pct_change` method, applying it to the result of a `groupby` operation (since we still want to observe the changes at the zip-code level).
```
z_merged.columns
columns = ['StateName', 'City', 'Metro', 'RegionName']
z_merged['zhvi_pc'] = z_merged.groupby(columns, dropna=False).zhvi.pct_change()
z_merged['hhi_pc'] = z_merged.groupby(columns, dropna=False).median_hhi.pct_change()
```
Note that the `pc` columns have null values for all the rows where the `year` is 2014. That is expected: the first value in each sequence represents no change (since it's the first value). For each subsequent value, the percentage change is calculated with respect to the preceding value.
```
z_merged
```
If we're just interested in the percentage change, we could drop the 2014 rows.
```
z_merged = z_merged.loc[z_merged.year == 2019].copy()
z_merged.describe()
```
And here's a demonstration of how we could use a custom function to calculate percentage change. There's no need to do so -- the built-in method will be more efficient. But sometimes you want to compute an aggregation that's not available as a built-in method.
For relatively simple calculations, we can use a `lambda` function, which is basically a one-line Python function. We define it with `lambda` instead of `def`.
The `x` in the lambda expression is a function parameter. When used with `apply` on the `zhvi` column, `x` will represent a pandas Series containing the ZHVI values for each group created by our `groupby` expression.
Since `x` is a Series, it has all the usual Series methods and properties, including `iloc`, which lets us take the first and second values for the purposes of calculating the percentage change.
```
z_means.groupby(['RegionName', 'StateName', 'City']).zhvi.apply(lambda x: (x.iloc[1] - x.iloc[0]) / x.iloc[0])
```
We can now use our merged, aggregated dataset to investigate certain questions.
For example, where have home values risen *faster* than household income?
```
z_merged.loc[z_merged.zhvi_pc > z_merged.hhi_pc]
```
Where have they kept pace or lagged behind income?
```
z_merged.loc[z_merged.zhvi_pc <= z_merged.hhi_pc]
```
### Questions for practice
1. Which states have the greatest range in home values? Hint: select year, group by state, calculate max - min
2. Can you find any cities where the *average* percent change in median income is higher than the *average* percent change in home values?
3. Are the most expensive cities (to buy in) the wealthiest (by median income)?
-------------
1. Answer
a. We need to pick a point in time: let's say 2019.
```
z19 = z_means.loc[z_means.year == 2019]
```
1. b. Then we can group by state and use `apply` with a lambda function to find the difference between the largest and smallest values in each group.
```
z_19_range = z19.groupby('StateName').zhvi.apply(lambda x: x.max() - x.min())
```
1. c. Finally, we can sort the values in descending order to find the biggest differences.
```
z_19_range.sort_values(ascending=False)
```
2. Answer
a. We can group our merged dataset by city and state.
```
city_pc = z_merged.groupby(['City', 'StateName'])
```
2. b. To compute the mean of two columns at the same time, we can use the `agg` method available on a pandas `GroupBy` object. This method takes a dictionary mapping column names to the name of a built-in pandas method or a lambda function.
```
city_pc = city_pc.agg({'zhvi_pc': 'mean',
'hhi_pc': 'mean'})
```
2. c. Now we can use `.loc` to find those rows where our mean ZHVI percentage change is smaller.
```
city_pc.loc[city_pc.zhvi_pc < city_pc.hhi_pc]
```
3. Answer
a. First we group by state and city (to account for cities that may have the same names in different states) and take the mean of the ZHVI. We sort so that the highest numbers are at the top.
```
zhvi_cities = z_merged.groupby(['StateName', 'City']).zhvi.mean().sort_values(ascending=False)
```
3. b. We do the same for median income.
```
hhi_cities = z_merged.groupby(['StateName', 'City']).median_hhi.mean().sort_values(ascending=False)
```
3. c. For each of these, we take the top 100 rows (after resetting the index).
```
top_zhvi_cities = zhvi_cities.reset_index().head(100)
top_hhi_cities = hhi_cities.reset_index().head(100)
```
3. d. Now we can use `merge` to find those cities in the top 100 for ZHVI that are also in the top 100 for median income. Since the `StateName` and `City` columns are common to both of the DataFrames we're mering we don't have to specify them as the keys for merging -- pandas will use them by default.
```
top_zhvi_cities.merge(top_hhi_cities)
```
|
github_jupyter
|
import pandas as pd
!pip install census==0.8.17
from census import Census
zhvi_all_homes = 'https://files.zillowstatic.com/research/public_csvs/zhvi/Zip_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_month.csv?t=1631541070'
zhvi_df = pd.read_csv(zhvi_all_homes)
zhvi_df.columns
len(zhvi_df)
z_df = zhvi_df.melt(id_vars=zhvi_df.columns[0:9], var_name='month', value_name='value')
len(z_df)
z_df.dtypes
z_df['RegionName'] = z_df['RegionName'].astype(str).apply(lambda x: x.zfill(5))
z_df['month'] = pd.to_datetime(z_df['month'])
z_df.loc[z_df.month.dt.year == 2021]
pd.set_option('display.float_format','{:,.2f}'.format)
z_df
z_df.isna().sum()
z_df.loc[z_df.value.isna()]
zhvi_df.loc[zhvi_df.RegionName.duplicated(keep=False)]
z_df.loc[z_df.duplicated(subset=['RegionName', 'month'], keep=False)]
apikey = ''
census = Census(apikey)
median_income = "B19013_001E"
census = Census(apikey)
acs2019 = census.acs5.zipcode(median_income, '*')
acs2014 = census.acs5.zipcode(median_income, '*', year=2014)
acs2019
acs_df = pd.DataFrame.from_records(acs2019)
acs_df = acs_df.rename(columns={'B19013_001E': '2019_median_hhi',
'zip code tabulation area': 'zip_code'})
len(acs_df)
def create_acs_df(datasets, years):
'''
Accepts a list of pandas DataFrames to concat and a list of years, which will be added as a column to the resulting DataFrame
'''
# Step 1: Create an empty DataFrame -- for the first dataset, we need something to concat it with
df_all = pd.DataFrame()
# Step 2: Create a for loop: we can use the zip method to loop over the datasets and years at the same time
for dataset, year in zip(datasets, years):
# Create a DataFrame from the current dataset
df = pd.DataFrame.from_records(dataset)
# Create a year column and populate it with the corresponding year
df['year'] = year
# Rename the columns
df = df.rename(columns={'B19013_001E': 'median_hhi',
'zip code tabulation area': 'zip_code'})
# Concat with the previous DataFrame
df_all = pd.concat([df_all, df])
# Don't forget to return something!
return df_all
acs_df = create_acs_df([acs2019, acs2014], [2019, 2014])
acs_df
acs_df.describe()
acs_df.loc[acs_df.median_hhi < 0].median_hhi.unique()
acs_df = acs_df.loc[acs_df.median_hhi >= 0].copy()
acs_df.describe()
z_df = z_df.loc[(z_df.month.dt.year == 2019) | (z_df.month.dt.year == 2014)].copy()
len(z_df.loc[~z_df.RegionName.isin(acs_df.zip_code)].RegionName.unique())
len(acs_df.loc[~acs_df.zip_code.isin(z_df.RegionName)])
z_grp = z_df.groupby([z_df.StateName, z_df.City, z_df.Metro, z_df.RegionName, z_df.month.dt.year])
z_means = z_grp.value.mean()
z_means = z_means.reset_index()
z_means
z_means.loc[z_means.Metro.isnull()]
len(z_df.loc[~z_df.RegionName.isin(z_means.RegionName)].RegionName.unique())
z_grp = z_df.groupby([z_df.StateName, z_df.City, z_df.Metro, z_df.RegionName, z_df.month.dt.year],
dropna=False)
z_means = z_grp.value.mean()
z_means = z_means.reset_index()
z_means.loc[z_means.Metro.isnull()]
z_means
z_means = z_means.rename(columns={'month': 'year',
'value': 'zhvi'})
z_merged = z_means.merge(acs_df, left_on=['RegionName', 'year'],
right_on=['zip_code', 'year'])
z_merged = z_merged.drop(['state', 'zip_code'], axis=1)
z_merged = z_merged.sort_values(by='year')
z_merged.columns
columns = ['StateName', 'City', 'Metro', 'RegionName']
z_merged['zhvi_pc'] = z_merged.groupby(columns, dropna=False).zhvi.pct_change()
z_merged['hhi_pc'] = z_merged.groupby(columns, dropna=False).median_hhi.pct_change()
z_merged
z_merged = z_merged.loc[z_merged.year == 2019].copy()
z_merged.describe()
z_means.groupby(['RegionName', 'StateName', 'City']).zhvi.apply(lambda x: (x.iloc[1] - x.iloc[0]) / x.iloc[0])
z_merged.loc[z_merged.zhvi_pc > z_merged.hhi_pc]
z_merged.loc[z_merged.zhvi_pc <= z_merged.hhi_pc]
z19 = z_means.loc[z_means.year == 2019]
z_19_range = z19.groupby('StateName').zhvi.apply(lambda x: x.max() - x.min())
z_19_range.sort_values(ascending=False)
city_pc = z_merged.groupby(['City', 'StateName'])
city_pc = city_pc.agg({'zhvi_pc': 'mean',
'hhi_pc': 'mean'})
city_pc.loc[city_pc.zhvi_pc < city_pc.hhi_pc]
zhvi_cities = z_merged.groupby(['StateName', 'City']).zhvi.mean().sort_values(ascending=False)
hhi_cities = z_merged.groupby(['StateName', 'City']).median_hhi.mean().sort_values(ascending=False)
top_zhvi_cities = zhvi_cities.reset_index().head(100)
top_hhi_cities = hhi_cities.reset_index().head(100)
top_zhvi_cities.merge(top_hhi_cities)
| 0.433022 | 0.993314 |
<br>
# Introdução
```
import os
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
from paths import *
```
<br>
# Plotly
```
df = pd.read_csv(
#os.path.join('data', 'tabs', 'tab_municipio_allinfos.csv'),
'https://raw.githubusercontent.com/michelmetran/pl251/main/data/tabs/tab_municipio_allinfos.csv',
)
df
# Classes
#class_1 = 'nome_ugrhi'
class_1 = 'nome_rm'
class_2 = 'unidade'
# Listas para Iteração
list_class_1 = list(set(df[class_1]))
list_class_1.sort()
list_class_2 = list(set(df[class_2]))
list_class_2.sort()
list_class = []
for i in list_class_1:
#print('Para a {} "{}" temos:'.format(class_1, i))
df_temp = df[df[class_1] == i].copy()
list_subclass = []
for j in list_class_2:
#print('Na {} "{}"'.format(class_2, j))
df_temp2 = df_temp[df_temp[class_2] == j].copy()
list_mun = list(df_temp2['municipio_nome'])
if len(list_mun) == 0:
list_text = ''
list_subclass.append(list_text)
elif len(list_mun) != 0:
list_mun.sort()
list_text = '<br>'.join(list_mun)
#print('> {}'.format(list_text))
list_subclass.append(list_text)
list_class.append(list_subclass)
# Convert to Array
df_array = df.groupby(by=[class_1])['unidade'].value_counts().sort_index()
df_array = df_array.unstack()
#print(df_array)
#display(df_array)
# Principal Data
data_1 = df_array.replace(np.nan, 0, regex=True).to_numpy()
data_1 = data_1.astype(np.int64)
data_2 = np.array([int(i) for i in list(df_array.sum(axis=1))])
data_2 = np.reshape(data_2, (1, len(df_array))).T
data_3 = np.array([int(i) for i in list(df_array.sum(axis=0))])
data_3 = np.reshape(data_3, (1, len(df_array.columns)))
data_4 = np.array([[data_3.sum()]])
# Labels
x_label = list(df_array.columns)
y_label = list(df_array.index)
# Invert Matrices
data_1 = data_1[::-1]
data_2 = data_2[::-1]
data_3 = data_3[::-1]
list_class = list_class[::-1]
#x_label = x_label[::-1]
y_label = y_label[::-1]
# Results
print(data_1)
print(data_2)
print(data_3)
print(data_4)
print(y_label)
y_label = [(x.replace('Região Metropolitana do ', 'RM<br>').
replace('Região Metropolitana de ', 'RM<br>').
replace('Região Metropolitana da ', 'RM<br>').
replace('Aglomeração Urbana de ', 'AU<br>').
replace('-AU- Piracicaba', '').
replace('Vale do Paraíba e Litoral Norte', 'V. Paraíba e L. Norte')
) for x in y_label]
y_label
# Create Subplots
fig = make_subplots(
rows=2,
cols=2,
row_heights=[0.85, 0.1],
column_widths=[0.85, 0.1],
vertical_spacing=0.05,
horizontal_spacing=0.05,
print_grid=True,
shared_xaxes=True,
shared_yaxes=True,
)
colorscale = 'YlGnBu'
# Make Annotated Heatmap
fig1 = ff.create_annotated_heatmap(
data_1,
text=list_class,
x=[i.replace(' - ', '<br>') for i in x_label],
y=y_label,
colorscale=colorscale,
#font_colors=['black'],
hoverinfo='text',
)
fig2 = ff.create_annotated_heatmap(
data_2,
text=[['Nº Mun.<br>{}'.format(i.replace('<br>', ' ')) for i in y_label]],
colorscale=colorscale,
#font_colors=['black'],
hoverinfo='text',
)
fig3 = ff.create_annotated_heatmap(
data_3,
text=[['Nº Mun.<br>{}'.format(i) for i in x_label]],
colorscale=colorscale,
#font_colors=['black'],
hoverinfo='text',
)
fig4 = ff.create_annotated_heatmap(
data_4,
text=[['Nº Mun. Estado']],
colorscale=colorscale,
#font_colors=['black'],
hoverinfo='text',
)
# Adds
fig.add_trace(fig1.data[0], 1, 1)
fig.add_trace(fig2.data[0], 1, 2)
fig.add_trace(fig3.data[0], 2, 1)
fig.add_trace(fig4.data[0], 2, 2)
# Label
annot1 = list(fig1.layout.annotations)
annot2 = list(fig2.layout.annotations)
annot3 = list(fig3.layout.annotations)
annot4 = list(fig4.layout.annotations)
for k in range(len(annot2)):
annot2[k]['xref'] = 'x2'
annot2[k]['yref'] = 'y2'
for k in range(len(annot3)):
annot3[k]['xref'] = 'x3'
annot3[k]['yref'] = 'y3'
for k in range(len(annot4)):
annot4[k]['xref'] = 'x4'
annot4[k]['yref'] = 'y4'
fig.update_layout(annotations=annot1+annot2+annot3+annot4)
# Updates
fig.update_layout(
#title_text='URAEs <i>vs.</i> RMs/AUs',
width=700,
height=700,
showlegend=False,
font_size=11,
hovermode='closest',
#hovermode='x',
xaxis1_showticklabels=True,
yaxis1_showticklabels=True,
xaxis2_showticklabels=False,
yaxis2_showticklabels=False,
xaxis3_showticklabels=False,
yaxis3_showticklabels=False,
xaxis4_showticklabels=False,
yaxis4_showticklabels=False,
xaxis=go.layout.XAxis(
tickangle=-40,
side='top',
),
dragmode=False,
)
fig.update_traces(
#colorbar_tickangle=60,
#selector=dict(type='heatmap')
)
fig.update_layout(
xaxis_domain=[0.0, 0.85],
yaxis_domain=[0.15, 1.0],
)
# Results
config = {
'displayModeBar': False,
#'scrollZoom': False,
'displaylogo': False,
#'staticPlot': True,
}
fig.show(config=config)
fig.write_html(os.path.join(imgs_path, 'matrix_{}.html'.format(class_1.replace('nome_', ''))))
```
|
github_jupyter
|
import os
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
from paths import *
df = pd.read_csv(
#os.path.join('data', 'tabs', 'tab_municipio_allinfos.csv'),
'https://raw.githubusercontent.com/michelmetran/pl251/main/data/tabs/tab_municipio_allinfos.csv',
)
df
# Classes
#class_1 = 'nome_ugrhi'
class_1 = 'nome_rm'
class_2 = 'unidade'
# Listas para Iteração
list_class_1 = list(set(df[class_1]))
list_class_1.sort()
list_class_2 = list(set(df[class_2]))
list_class_2.sort()
list_class = []
for i in list_class_1:
#print('Para a {} "{}" temos:'.format(class_1, i))
df_temp = df[df[class_1] == i].copy()
list_subclass = []
for j in list_class_2:
#print('Na {} "{}"'.format(class_2, j))
df_temp2 = df_temp[df_temp[class_2] == j].copy()
list_mun = list(df_temp2['municipio_nome'])
if len(list_mun) == 0:
list_text = ''
list_subclass.append(list_text)
elif len(list_mun) != 0:
list_mun.sort()
list_text = '<br>'.join(list_mun)
#print('> {}'.format(list_text))
list_subclass.append(list_text)
list_class.append(list_subclass)
# Convert to Array
df_array = df.groupby(by=[class_1])['unidade'].value_counts().sort_index()
df_array = df_array.unstack()
#print(df_array)
#display(df_array)
# Principal Data
data_1 = df_array.replace(np.nan, 0, regex=True).to_numpy()
data_1 = data_1.astype(np.int64)
data_2 = np.array([int(i) for i in list(df_array.sum(axis=1))])
data_2 = np.reshape(data_2, (1, len(df_array))).T
data_3 = np.array([int(i) for i in list(df_array.sum(axis=0))])
data_3 = np.reshape(data_3, (1, len(df_array.columns)))
data_4 = np.array([[data_3.sum()]])
# Labels
x_label = list(df_array.columns)
y_label = list(df_array.index)
# Invert Matrices
data_1 = data_1[::-1]
data_2 = data_2[::-1]
data_3 = data_3[::-1]
list_class = list_class[::-1]
#x_label = x_label[::-1]
y_label = y_label[::-1]
# Results
print(data_1)
print(data_2)
print(data_3)
print(data_4)
print(y_label)
y_label = [(x.replace('Região Metropolitana do ', 'RM<br>').
replace('Região Metropolitana de ', 'RM<br>').
replace('Região Metropolitana da ', 'RM<br>').
replace('Aglomeração Urbana de ', 'AU<br>').
replace('-AU- Piracicaba', '').
replace('Vale do Paraíba e Litoral Norte', 'V. Paraíba e L. Norte')
) for x in y_label]
y_label
# Create Subplots
fig = make_subplots(
rows=2,
cols=2,
row_heights=[0.85, 0.1],
column_widths=[0.85, 0.1],
vertical_spacing=0.05,
horizontal_spacing=0.05,
print_grid=True,
shared_xaxes=True,
shared_yaxes=True,
)
colorscale = 'YlGnBu'
# Make Annotated Heatmap
fig1 = ff.create_annotated_heatmap(
data_1,
text=list_class,
x=[i.replace(' - ', '<br>') for i in x_label],
y=y_label,
colorscale=colorscale,
#font_colors=['black'],
hoverinfo='text',
)
fig2 = ff.create_annotated_heatmap(
data_2,
text=[['Nº Mun.<br>{}'.format(i.replace('<br>', ' ')) for i in y_label]],
colorscale=colorscale,
#font_colors=['black'],
hoverinfo='text',
)
fig3 = ff.create_annotated_heatmap(
data_3,
text=[['Nº Mun.<br>{}'.format(i) for i in x_label]],
colorscale=colorscale,
#font_colors=['black'],
hoverinfo='text',
)
fig4 = ff.create_annotated_heatmap(
data_4,
text=[['Nº Mun. Estado']],
colorscale=colorscale,
#font_colors=['black'],
hoverinfo='text',
)
# Adds
fig.add_trace(fig1.data[0], 1, 1)
fig.add_trace(fig2.data[0], 1, 2)
fig.add_trace(fig3.data[0], 2, 1)
fig.add_trace(fig4.data[0], 2, 2)
# Label
annot1 = list(fig1.layout.annotations)
annot2 = list(fig2.layout.annotations)
annot3 = list(fig3.layout.annotations)
annot4 = list(fig4.layout.annotations)
for k in range(len(annot2)):
annot2[k]['xref'] = 'x2'
annot2[k]['yref'] = 'y2'
for k in range(len(annot3)):
annot3[k]['xref'] = 'x3'
annot3[k]['yref'] = 'y3'
for k in range(len(annot4)):
annot4[k]['xref'] = 'x4'
annot4[k]['yref'] = 'y4'
fig.update_layout(annotations=annot1+annot2+annot3+annot4)
# Updates
fig.update_layout(
#title_text='URAEs <i>vs.</i> RMs/AUs',
width=700,
height=700,
showlegend=False,
font_size=11,
hovermode='closest',
#hovermode='x',
xaxis1_showticklabels=True,
yaxis1_showticklabels=True,
xaxis2_showticklabels=False,
yaxis2_showticklabels=False,
xaxis3_showticklabels=False,
yaxis3_showticklabels=False,
xaxis4_showticklabels=False,
yaxis4_showticklabels=False,
xaxis=go.layout.XAxis(
tickangle=-40,
side='top',
),
dragmode=False,
)
fig.update_traces(
#colorbar_tickangle=60,
#selector=dict(type='heatmap')
)
fig.update_layout(
xaxis_domain=[0.0, 0.85],
yaxis_domain=[0.15, 1.0],
)
# Results
config = {
'displayModeBar': False,
#'scrollZoom': False,
'displaylogo': False,
#'staticPlot': True,
}
fig.show(config=config)
fig.write_html(os.path.join(imgs_path, 'matrix_{}.html'.format(class_1.replace('nome_', ''))))
| 0.163079 | 0.586168 |
```
%cd ..
import json
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from sts_wrldom.corpusReader import read_data
from sts_wrldom.enrichPipe import preprocess_raw
from sts_wrldom.depTFIDFModel import depFit_Predict
from sts_wrldom.utils import log_frame, accuracy, get_scores, rmse, write_results
dfs = read_data(["dev", "train"])
dev = dfs["dev"]
train = dfs["train"]
dev_train = dev.append(train)
%%time
dev_docs = preprocess_raw(dfs["dev"])
train_docs = preprocess_raw(dfs["train"])
dev_train_docs = dev_docs + train_docs
dev_predics = depFit_Predict(dev_docs)
train_predics = depFit_Predict(train_docs)
dev_train_predics = depFit_Predict(dev_train_docs)
dev["prediction"] = [int(elem) for elem in np.round(dev_predics)]
train["prediction"] = [int(elem) for elem in np.round(train_predics)]
dev_train["prediction"] = [int(elem) for elem in np.round(dev_train_predics)]
for df, name in zip([dev, train], ["dev", "train"]):
log_frame(df, name=name, tag="depTFIDF_predics")
res = df[["id", "prediction"]]
write_results(res, name, "depPredic")
for df, name in zip([dev, train, dev_train], ["Dev", "Train", "Dev-Train"]):
acc = accuracy(df["prediction"], df["gold"])
_rmse = rmse(df["prediction"], df["gold"])
pear_corr = pearsonr(list(df["prediction"]), list(df["gold"]))
cols = ["RMSE", "Accuracy", "Pearson's R", "Pearson's R p-val"]
vals = [_rmse, acc, pear_corr[0], pear_corr[1]]
stats = pd.DataFrame(list(df["prediction"]), columns=["Predic_Label"]).describe()
extra = pd.DataFrame(vals, index=cols, columns=["Predic_Label"])
print(f"\n{name} Gold stats: ")
print(pd.DataFrame(list(df["gold"]), columns=["Gold_Label"]).describe().T)
print(f"\n{name} depTFIDF Model Prediction stats: ")
print(stats.append(extra).T)
print("\n------")
for df, name in zip([dev, train, dev_train], ["Dev", "Train", "Dev-Train"]):
print(f"\n{name} Prediction Metrics:")
metrics = get_scores(list(df["prediction"]), list(df["gold"]))
print(json.dumps(metrics, indent=2))
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
labels = [1, 2, 3, 4, 5]
for df, name in zip([dev, train, dev_train], ["Dev-Set", "Train-Set", "Dev_Train-Set"]):
cm = confusion_matrix(list(df["gold"]), list(df["prediction"]))
df_cm = pd.DataFrame(cm, index=labels, columns=labels)
f,(ax1,ax2) = plt.subplots(1,2,sharey=False, figsize=(10,3))
g1 = sns.heatmap(df_cm,annot=True, fmt='d', ax=ax1)
g1.set_ylabel('True Label')
g1.set_xlabel('Predicted Label')
g1.set_title(f'{name} Confusion Matrix')
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
df_cm_norm = pd.DataFrame(cm_norm, index=labels, columns=labels)
g2 = sns.heatmap(df_cm_norm,annot=True, vmin=0, vmax=1, ax=ax2)
g2.set_ylabel('True Label')
g2.set_xlabel('Predicted Label')
g2.set_title(f'{name} Normed Confusion Matrix')
```
|
github_jupyter
|
%cd ..
import json
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from sts_wrldom.corpusReader import read_data
from sts_wrldom.enrichPipe import preprocess_raw
from sts_wrldom.depTFIDFModel import depFit_Predict
from sts_wrldom.utils import log_frame, accuracy, get_scores, rmse, write_results
dfs = read_data(["dev", "train"])
dev = dfs["dev"]
train = dfs["train"]
dev_train = dev.append(train)
%%time
dev_docs = preprocess_raw(dfs["dev"])
train_docs = preprocess_raw(dfs["train"])
dev_train_docs = dev_docs + train_docs
dev_predics = depFit_Predict(dev_docs)
train_predics = depFit_Predict(train_docs)
dev_train_predics = depFit_Predict(dev_train_docs)
dev["prediction"] = [int(elem) for elem in np.round(dev_predics)]
train["prediction"] = [int(elem) for elem in np.round(train_predics)]
dev_train["prediction"] = [int(elem) for elem in np.round(dev_train_predics)]
for df, name in zip([dev, train], ["dev", "train"]):
log_frame(df, name=name, tag="depTFIDF_predics")
res = df[["id", "prediction"]]
write_results(res, name, "depPredic")
for df, name in zip([dev, train, dev_train], ["Dev", "Train", "Dev-Train"]):
acc = accuracy(df["prediction"], df["gold"])
_rmse = rmse(df["prediction"], df["gold"])
pear_corr = pearsonr(list(df["prediction"]), list(df["gold"]))
cols = ["RMSE", "Accuracy", "Pearson's R", "Pearson's R p-val"]
vals = [_rmse, acc, pear_corr[0], pear_corr[1]]
stats = pd.DataFrame(list(df["prediction"]), columns=["Predic_Label"]).describe()
extra = pd.DataFrame(vals, index=cols, columns=["Predic_Label"])
print(f"\n{name} Gold stats: ")
print(pd.DataFrame(list(df["gold"]), columns=["Gold_Label"]).describe().T)
print(f"\n{name} depTFIDF Model Prediction stats: ")
print(stats.append(extra).T)
print("\n------")
for df, name in zip([dev, train, dev_train], ["Dev", "Train", "Dev-Train"]):
print(f"\n{name} Prediction Metrics:")
metrics = get_scores(list(df["prediction"]), list(df["gold"]))
print(json.dumps(metrics, indent=2))
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
labels = [1, 2, 3, 4, 5]
for df, name in zip([dev, train, dev_train], ["Dev-Set", "Train-Set", "Dev_Train-Set"]):
cm = confusion_matrix(list(df["gold"]), list(df["prediction"]))
df_cm = pd.DataFrame(cm, index=labels, columns=labels)
f,(ax1,ax2) = plt.subplots(1,2,sharey=False, figsize=(10,3))
g1 = sns.heatmap(df_cm,annot=True, fmt='d', ax=ax1)
g1.set_ylabel('True Label')
g1.set_xlabel('Predicted Label')
g1.set_title(f'{name} Confusion Matrix')
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
df_cm_norm = pd.DataFrame(cm_norm, index=labels, columns=labels)
g2 = sns.heatmap(df_cm_norm,annot=True, vmin=0, vmax=1, ax=ax2)
g2.set_ylabel('True Label')
g2.set_xlabel('Predicted Label')
g2.set_title(f'{name} Normed Confusion Matrix')
| 0.436382 | 0.275398 |
# 1. Super-cell writer
A code that generate 2D Serpent input files to perform FM precomputation.
## 1.1. Content
* objectZoo: contains the super-cells building blocks.
* Pin(name, dimensions, materials)
* Assm(name, pin_map, pitch, type_lattice)
* Cell(name, assm_map, pitch, type_lattice)
* Geometry(name, pin_set, assm_set, bc)
* Material(name, density, temperature, composition, moder)
* FissionMatrix(type_fm, limits)
* inputWriter: a collection of writers to create the Serpent input file. Inputs:
* filePath, string
* Geometry, object
* Materials, objects List
* Settings, dictionary
* FissionMatrix, object.
## 1.2. Example
```
# Import classes
from objectZoo import Pin, Assm, Cell, Geometry, Material, FissionMatrix
from inputWriter import SerpentWriter as sW
# FilePaths
filePath = 'miniCore.i'
libX = '/nv/hp22/dkotlyar6/data/Codes/DATA/sss_endfb7u.xsdata'
# Pin
radiiP = [0.410, 0.475, 1.26]
nPins = 17
# Assembly
pitchA = nPins*radiiP[-1]
pin_map = [['ff'] * nPins] * nPins
# SuperCell
assm_map = [['a1','a2']]
bc = ['vacuum', 'reflective'] # x and y direction
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % MATERIALS %
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
materials = []
# FUEL
composition = [['92235.09c', 0.02644492], ['92238.09c', 0.85505247], ['8016.09c', 0.11850261]]
fuel = Material('fuel', -10.3067, 900, composition, 'mass')
materials.append(fuel)
# WATER
composition = [['1001.06c', 0.6666667], ['8016.06c', 0.3333333]]
water = Material('water', -0.700452, 600, composition, 'molar', 'lwj3.11t ')
materials.append(water)
# Clad
composition = [['40000.06c', 1.0]]
clad = Material('clad', -6.5, 600, composition)
materials.append(clad)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % GEOMETRY %
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
pin1 = Pin('ff', radiiP, [fuel.name, water.name, clad.name])
a1 = Assm('a1', pin_map, radiiP[-1])
a2 = Assm('a2', pin_map, radiiP[-1])
superCell = Cell('Super', assm_map, pitchA, bc)
geometry = Geometry('miniCore', [pin1], [a1, a2], superCell)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % SETTINGS %
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
settings = {'pop': 100000,
'active cycles': 100,
'inactive cycles': 50,
'k guess': 1.0,
'ures': '92238.09c',
'lib': libX
}
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % FISSION MATRIX %
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fm = FissionMatrix('cartesian',
[-pitchA, pitchA, 2*nPins, pitchA/2, pitchA/2, nPins,
-1e37, 1e37, 1])
# EXECUTE
serpentInp = sW(filePath, geometry, materials, settings, fm)
serpentInp.write()
```
## 1.3. Application to FM pregeneration stage
Current and future work
|
github_jupyter
|
# Import classes
from objectZoo import Pin, Assm, Cell, Geometry, Material, FissionMatrix
from inputWriter import SerpentWriter as sW
# FilePaths
filePath = 'miniCore.i'
libX = '/nv/hp22/dkotlyar6/data/Codes/DATA/sss_endfb7u.xsdata'
# Pin
radiiP = [0.410, 0.475, 1.26]
nPins = 17
# Assembly
pitchA = nPins*radiiP[-1]
pin_map = [['ff'] * nPins] * nPins
# SuperCell
assm_map = [['a1','a2']]
bc = ['vacuum', 'reflective'] # x and y direction
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % MATERIALS %
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
materials = []
# FUEL
composition = [['92235.09c', 0.02644492], ['92238.09c', 0.85505247], ['8016.09c', 0.11850261]]
fuel = Material('fuel', -10.3067, 900, composition, 'mass')
materials.append(fuel)
# WATER
composition = [['1001.06c', 0.6666667], ['8016.06c', 0.3333333]]
water = Material('water', -0.700452, 600, composition, 'molar', 'lwj3.11t ')
materials.append(water)
# Clad
composition = [['40000.06c', 1.0]]
clad = Material('clad', -6.5, 600, composition)
materials.append(clad)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % GEOMETRY %
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
pin1 = Pin('ff', radiiP, [fuel.name, water.name, clad.name])
a1 = Assm('a1', pin_map, radiiP[-1])
a2 = Assm('a2', pin_map, radiiP[-1])
superCell = Cell('Super', assm_map, pitchA, bc)
geometry = Geometry('miniCore', [pin1], [a1, a2], superCell)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % SETTINGS %
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
settings = {'pop': 100000,
'active cycles': 100,
'inactive cycles': 50,
'k guess': 1.0,
'ures': '92238.09c',
'lib': libX
}
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % FISSION MATRIX %
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fm = FissionMatrix('cartesian',
[-pitchA, pitchA, 2*nPins, pitchA/2, pitchA/2, nPins,
-1e37, 1e37, 1])
# EXECUTE
serpentInp = sW(filePath, geometry, materials, settings, fm)
serpentInp.write()
| 0.316264 | 0.687076 |
# Heart-disease-Prediction-using-Machine-Learning-Algorithms
```
#Here I have imported all the essentil libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
%matplotlib inline
sns.set_style("whitegrid")
plt.style.use("ggplot")
#here I input the dataset folder
df = pd.read_csv("C:/Users/Abhishek Nagrecha/Desktop/input/heart.csv")
df.head()
#this is the shape of the used dataset
df.info()
print( "The shape of the data is:",df.shape)
#To display how many patients have got a heart disease
df.target.value_counts()
# df.sex.value_counts()
```
# Dataset Exploration for better understanding
```
df.target.value_counts().plot(kind="bar", color=["red", "blue"])
# Checking for the missing values in the dataset
df.isna().sum()
categorical_val = []
continuous_val = []
for column in df.columns:
print('-------------------------')
print(f"{column} : {df[column].unique()}")
if len(df[column].unique()) <= 10:
categorical_val.append(column)
else:
continuous_val.append(column)
print(categorical_val)
print(continuous_val)
#Here I have shown visually the categorical features in corelation with having a heart_disease
plt.figure(figsize=(20, 20))
for i, column in enumerate(categorical_val, 1):
plt.subplot(3, 3, i)
df[df["target"] == 0][column].hist(bins=35, color='blue', label='Heart Disease = NO', alpha=0.6)
df[df["target"] == 1][column].hist(bins=35, color='red', label='Heart Disease = YES', alpha=0.6)
plt.legend()
plt.xlabel(column)
#Here I have shown visually the continuous features in corelation with having a heart_disease
plt.figure(figsize=(20, 20))
for i, column in enumerate(continuous_val, 1):
plt.subplot(3, 3, i)
df[df["target"] == 0][column].hist(bins=35, color='blue', label='Heart Disease = NO', alpha=0.6)
df[df["target"] == 1][column].hist(bins=35, color='red', label='Heart Disease = YES', alpha=0.6)
plt.legend()
plt.xlabel(column)
```
# Data Pre-processing
```
# After exploring the dataset, I observed that I need to convert some
# categorical variables into dummy variables and scale all the values
categorical_val.remove('target')
dataset = pd.get_dummies(df, columns = categorical_val)
dataset.head()
print(df.columns)
print(dataset.columns)
from sklearn.preprocessing import MinMaxScaler
m_sc = MinMaxScaler()
col_to_scale = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak']
dataset[col_to_scale] = m_sc.fit_transform(dataset[col_to_scale])
dataset.head()
```
# Applying machine learning algorithms
```
#here I have specified all the scoring metrices which would be used to evalute the model's performance
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
print("Train Result:\n================================================")
print(f"Accuracy Score: {accuracy_score(y_train, pred) :.2f}")
print("_______________________________________________")
print("Classification Report:", end='')
print(f"\tPrecision Score: {precision_score(y_train, pred) :.2f}")
print(f"\t\t\tRecall Score: {recall_score(y_train, pred) :.2f}")
print(f"\t\t\tF1 score: {f1_score(y_train, pred) :.2f}")
print("_______________________________________________")
print(f"Confusion Matrix: \n {confusion_matrix(y_train, pred)}\n")
elif train==False:
pred = clf.predict(X_test)
print("Test Result:\n================================================")
print(f"Accuracy Score: {accuracy_score(y_test, pred) :.2f}")
print("_______________________________________________")
print("Classification Report:", end='')
print(f"\tPrecision Score: {precision_score(y_test, pred) :.2f}")
print(f"\t\t\tRecall Score: {recall_score(y_test, pred) :.2f}")
print(f"\t\t\tF1 score: {f1_score(y_test, pred) :.2f}")
print("_______________________________________________")
print(f"Confusion Matrix: \n {confusion_matrix(y_test, pred)}\n")
#here I divided the data in the ratio od 70:30
from sklearn.model_selection import train_test_split
X = dataset.drop('target', axis=1)
y = dataset.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
```
# Logistic Regression
```
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(solver='sag')
log_reg.fit(X_train, y_train)
print_score(log_reg, X_train, y_train, X_test, y_test, train=True)
print_score(log_reg, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, log_reg.predict(X_test))
train_score = accuracy_score(y_train, log_reg.predict(X_train))
results_df = pd.DataFrame(data=[["Logistic Regression", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
```
# K-nearest neighbors
```
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier()
knn_classifier.fit(X_train, y_train)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=True)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, knn_classifier.predict(X_test))
train_score = accuracy_score(y_train, knn_classifier.predict(X_train))
results_df = pd.DataFrame(data=[["K Nearest Neighbor", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
```
# Support Vector Machine
```
from sklearn.svm import SVC
svm_model = SVC(kernel='poly', gamma=0.1, C=1.0)
svm_model.fit(X_train, y_train)
print_score(svm_model, X_train, y_train, X_test, y_test, train=True)
print_score(svm_model, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, svm_model.predict(X_test))
train_score = accuracy_score(y_train, svm_model.predict(X_train))
results_df = pd.DataFrame(data=[["Support Vector Machine", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
```
# Hyperparameter Tuning to get better performances
```
#tuning the parameters for Logistic regression here
from sklearn.model_selection import RandomizedSearchCV, StratifiedKFold
from scipy.stats import randint
hyperparameters = {
'C': randint(0.0001, 1000),
'penalty': ['l1', 'l2'],
'max_iter': randint(100, 500),
'class_weight': [{1: 0.5, 0: 0.5}, {1: 0.4, 0: 0.6}, {1: 0.6, 0: 0.4}, {1: 0.7, 0: 0.3}, {1: 0.8, 0: 0.2}]
}
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
log_reg = LogisticRegression()
random_search_cv = RandomizedSearchCV(log_reg, hyperparameters, scoring="accuracy", n_jobs=-1, verbose=1, cv=5, iid=True)
random_search_cv.fit(X_train, y_train)
random_search_cv.best_estimator_
log_reg = LogisticRegression(C=741,
solver='warn',class_weight={0: 0.5, 1: 0.5},fit_intercept=True, intercept_scaling=1, l1_ratio=None,
max_iter=197, multi_class='warn', n_jobs=None, penalty='l1',
random_state=None, tol=0.0001, verbose=0,
warm_start=False)
log_reg.fit(X_train, y_train)
print_score(log_reg, X_train, y_train, X_test, y_test, train=True)
print_score(log_reg, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, log_reg.predict(X_test))
train_score = accuracy_score(y_train, log_reg.predict(X_train))
tuning_results_df = pd.DataFrame(data=[["Logistic Regression", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
tuning_results_df
#tuning the hyperparameters for K nearest neighbor here
hyperparameters = {'n_neighbors': randint(1, 10),
'leaf_size': randint(1, 8),
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'cityblock']
}
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
knn = KNeighborsClassifier()
random_search_cv = RandomizedSearchCV(knn, hyperparameters, scoring="accuracy", n_jobs=-1, verbose=1, cv=5, iid=True)
random_search_cv.fit(X_train, y_train)
random_search_cv.best_estimator_
knn_classifier = KNeighborsClassifier(n_neighbors=7,algorithm='auto', leaf_size=1, metric='euclidean',
metric_params=None, p=2,
weights='distance')
knn_classifier.fit(X_train, y_train)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=True)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, knn_classifier.predict(X_test))
train_score = accuracy_score(y_train, knn_classifier.predict(X_train))
results_df = pd.DataFrame(data=[[" K-nearest Neighbor", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
svm_model = SVC(kernel='rbf', gamma=0.1, C=1.0)
hyperparameters = {
"C": [0.001, 0.01,0.1,0.3,0.5,0.7,1,3,5,7,9],
"gamma": randint(0.01, 1),
'kernel': ['linear', 'rbf', 'poly', 'sigmoid'],
'degree': randint(1, 10)
}
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
svm_random = RandomizedSearchCV(svm_model, hyperparameters, n_jobs=-1, cv=5, verbose=1, scoring="accuracy")
svm_random.fit(X_train, y_train)
svm_model = SVC(C=5, gamma=0.1, kernel='rbf',cache_size=200, class_weight=None,
coef0=0.0, decision_function_shape='ovr',
degree=3,)
svm_model.fit(X_train, y_train)
print_score(svm_model, X_train, y_train, X_test, y_test, train=True)
print_score(svm_model, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, svm_model.predict(X_test))
train_score = accuracy_score(y_train, svm_model.predict(X_train))
results_df = pd.DataFrame(data=[["Support Vector Machine", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
```
|
github_jupyter
|
#Here I have imported all the essentil libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
%matplotlib inline
sns.set_style("whitegrid")
plt.style.use("ggplot")
#here I input the dataset folder
df = pd.read_csv("C:/Users/Abhishek Nagrecha/Desktop/input/heart.csv")
df.head()
#this is the shape of the used dataset
df.info()
print( "The shape of the data is:",df.shape)
#To display how many patients have got a heart disease
df.target.value_counts()
# df.sex.value_counts()
df.target.value_counts().plot(kind="bar", color=["red", "blue"])
# Checking for the missing values in the dataset
df.isna().sum()
categorical_val = []
continuous_val = []
for column in df.columns:
print('-------------------------')
print(f"{column} : {df[column].unique()}")
if len(df[column].unique()) <= 10:
categorical_val.append(column)
else:
continuous_val.append(column)
print(categorical_val)
print(continuous_val)
#Here I have shown visually the categorical features in corelation with having a heart_disease
plt.figure(figsize=(20, 20))
for i, column in enumerate(categorical_val, 1):
plt.subplot(3, 3, i)
df[df["target"] == 0][column].hist(bins=35, color='blue', label='Heart Disease = NO', alpha=0.6)
df[df["target"] == 1][column].hist(bins=35, color='red', label='Heart Disease = YES', alpha=0.6)
plt.legend()
plt.xlabel(column)
#Here I have shown visually the continuous features in corelation with having a heart_disease
plt.figure(figsize=(20, 20))
for i, column in enumerate(continuous_val, 1):
plt.subplot(3, 3, i)
df[df["target"] == 0][column].hist(bins=35, color='blue', label='Heart Disease = NO', alpha=0.6)
df[df["target"] == 1][column].hist(bins=35, color='red', label='Heart Disease = YES', alpha=0.6)
plt.legend()
plt.xlabel(column)
# After exploring the dataset, I observed that I need to convert some
# categorical variables into dummy variables and scale all the values
categorical_val.remove('target')
dataset = pd.get_dummies(df, columns = categorical_val)
dataset.head()
print(df.columns)
print(dataset.columns)
from sklearn.preprocessing import MinMaxScaler
m_sc = MinMaxScaler()
col_to_scale = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak']
dataset[col_to_scale] = m_sc.fit_transform(dataset[col_to_scale])
dataset.head()
#here I have specified all the scoring metrices which would be used to evalute the model's performance
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
print("Train Result:\n================================================")
print(f"Accuracy Score: {accuracy_score(y_train, pred) :.2f}")
print("_______________________________________________")
print("Classification Report:", end='')
print(f"\tPrecision Score: {precision_score(y_train, pred) :.2f}")
print(f"\t\t\tRecall Score: {recall_score(y_train, pred) :.2f}")
print(f"\t\t\tF1 score: {f1_score(y_train, pred) :.2f}")
print("_______________________________________________")
print(f"Confusion Matrix: \n {confusion_matrix(y_train, pred)}\n")
elif train==False:
pred = clf.predict(X_test)
print("Test Result:\n================================================")
print(f"Accuracy Score: {accuracy_score(y_test, pred) :.2f}")
print("_______________________________________________")
print("Classification Report:", end='')
print(f"\tPrecision Score: {precision_score(y_test, pred) :.2f}")
print(f"\t\t\tRecall Score: {recall_score(y_test, pred) :.2f}")
print(f"\t\t\tF1 score: {f1_score(y_test, pred) :.2f}")
print("_______________________________________________")
print(f"Confusion Matrix: \n {confusion_matrix(y_test, pred)}\n")
#here I divided the data in the ratio od 70:30
from sklearn.model_selection import train_test_split
X = dataset.drop('target', axis=1)
y = dataset.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(solver='sag')
log_reg.fit(X_train, y_train)
print_score(log_reg, X_train, y_train, X_test, y_test, train=True)
print_score(log_reg, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, log_reg.predict(X_test))
train_score = accuracy_score(y_train, log_reg.predict(X_train))
results_df = pd.DataFrame(data=[["Logistic Regression", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier()
knn_classifier.fit(X_train, y_train)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=True)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, knn_classifier.predict(X_test))
train_score = accuracy_score(y_train, knn_classifier.predict(X_train))
results_df = pd.DataFrame(data=[["K Nearest Neighbor", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
from sklearn.svm import SVC
svm_model = SVC(kernel='poly', gamma=0.1, C=1.0)
svm_model.fit(X_train, y_train)
print_score(svm_model, X_train, y_train, X_test, y_test, train=True)
print_score(svm_model, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, svm_model.predict(X_test))
train_score = accuracy_score(y_train, svm_model.predict(X_train))
results_df = pd.DataFrame(data=[["Support Vector Machine", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
#tuning the parameters for Logistic regression here
from sklearn.model_selection import RandomizedSearchCV, StratifiedKFold
from scipy.stats import randint
hyperparameters = {
'C': randint(0.0001, 1000),
'penalty': ['l1', 'l2'],
'max_iter': randint(100, 500),
'class_weight': [{1: 0.5, 0: 0.5}, {1: 0.4, 0: 0.6}, {1: 0.6, 0: 0.4}, {1: 0.7, 0: 0.3}, {1: 0.8, 0: 0.2}]
}
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
log_reg = LogisticRegression()
random_search_cv = RandomizedSearchCV(log_reg, hyperparameters, scoring="accuracy", n_jobs=-1, verbose=1, cv=5, iid=True)
random_search_cv.fit(X_train, y_train)
random_search_cv.best_estimator_
log_reg = LogisticRegression(C=741,
solver='warn',class_weight={0: 0.5, 1: 0.5},fit_intercept=True, intercept_scaling=1, l1_ratio=None,
max_iter=197, multi_class='warn', n_jobs=None, penalty='l1',
random_state=None, tol=0.0001, verbose=0,
warm_start=False)
log_reg.fit(X_train, y_train)
print_score(log_reg, X_train, y_train, X_test, y_test, train=True)
print_score(log_reg, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, log_reg.predict(X_test))
train_score = accuracy_score(y_train, log_reg.predict(X_train))
tuning_results_df = pd.DataFrame(data=[["Logistic Regression", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
tuning_results_df
#tuning the hyperparameters for K nearest neighbor here
hyperparameters = {'n_neighbors': randint(1, 10),
'leaf_size': randint(1, 8),
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'cityblock']
}
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
knn = KNeighborsClassifier()
random_search_cv = RandomizedSearchCV(knn, hyperparameters, scoring="accuracy", n_jobs=-1, verbose=1, cv=5, iid=True)
random_search_cv.fit(X_train, y_train)
random_search_cv.best_estimator_
knn_classifier = KNeighborsClassifier(n_neighbors=7,algorithm='auto', leaf_size=1, metric='euclidean',
metric_params=None, p=2,
weights='distance')
knn_classifier.fit(X_train, y_train)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=True)
print_score(knn_classifier, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, knn_classifier.predict(X_test))
train_score = accuracy_score(y_train, knn_classifier.predict(X_train))
results_df = pd.DataFrame(data=[[" K-nearest Neighbor", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
svm_model = SVC(kernel='rbf', gamma=0.1, C=1.0)
hyperparameters = {
"C": [0.001, 0.01,0.1,0.3,0.5,0.7,1,3,5,7,9],
"gamma": randint(0.01, 1),
'kernel': ['linear', 'rbf', 'poly', 'sigmoid'],
'degree': randint(1, 10)
}
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
svm_random = RandomizedSearchCV(svm_model, hyperparameters, n_jobs=-1, cv=5, verbose=1, scoring="accuracy")
svm_random.fit(X_train, y_train)
svm_model = SVC(C=5, gamma=0.1, kernel='rbf',cache_size=200, class_weight=None,
coef0=0.0, decision_function_shape='ovr',
degree=3,)
svm_model.fit(X_train, y_train)
print_score(svm_model, X_train, y_train, X_test, y_test, train=True)
print_score(svm_model, X_train, y_train, X_test, y_test, train=False)
test_score = accuracy_score(y_test, svm_model.predict(X_test))
train_score = accuracy_score(y_train, svm_model.predict(X_train))
results_df = pd.DataFrame(data=[["Support Vector Machine", train_score, test_score]],
columns=['Machine Learning Model', 'Train Accuracy', 'Test Accuracy'])
results_df
| 0.517815 | 0.872728 |
```
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import pickle
import gzip
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import linear_model
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from helpers import plotcfg
from helpers import show_results
from helpers import preprocessing
from helpers import classic_algorithms
from helpers import learning_curve
```
## Scenario
Quick recap: in this scenario, 1000 spectra were created, among which 50% had a main peak with SNR varying between 1 and 3 dB. In 5% of the spectra, a spurious peak of intensity ranging from 1 to 3 dB was placed at random positions in the spectra.
```
X, y, d = pickle.load(gzip.open('../data/artificial.pickle', 'rb'), encoding='latin1')
```
## Model definition
The idea behind each step in the preprocessing phase is explained in the previous notebooks. Here, I just used a pipeline to stitch everything together.
```
model = make_pipeline(preprocessing.Cwt(), # Wavelet transform
StandardScaler(),
preprocessing.ForestSelect(k=10, trees=100), # Feature selection via Random Forest
linear_model.LogisticRegression())
```
The model was tested against the conventional methods for evaluating peak presence in gamma-ray spectra (Unidentified Second Difference and Library Correlation). I implemented these algorithms inside a sklearn Estimator class, so that they can be evaluated in the same pipeline using the same class methods as the original sklearn estimators.
Cross-validation was repeated KFold with repetitions=3 and k=10.
## Results
The performance of the ML model was vastly superior than the traditional methods with regard to aROC values, as well as with every other metric (see below).
```
clf = [(u'ML model', model),
(u'Second difference', classic_algorithms.SecondDifference(channel=50,
fwhm=3,
tol=1)),
(u'Library correlation', classic_algorithms.LibCorNID(channel=50,
sensitivity=0.8,
fwhm=3,
tol=1))]
results = show_results.summary('Peak Classification',
clf,
X,
y,
cv=True,
n_iter=1,
train_sizes=np.linspace(0.05,1.00,50),
n_jobs=3, # avoid hijacking all cpus
learnGraph=False,
rocGraph=True)
pd.DataFrame(results, columns=show_results.columns).set_index("Method")
```
However, the accuracy values of the traditional methods are far smaller than their respective aROC values. This indicates that, in contrast to the ML model, the methods default parameters may need some adjustment.
## Learning curve
The machine learning model's learning efficiency is also something. It achieves almost minimum validation error after learning from just a handful spectrum.
```
learning_curve.plot_learning_curve(model, X, y, cv=5, train_sizes=np.linspace(0.05,1,40))
```
It is noticed that the training error rate (blue) is lower than the validation error rate (orange), but that both converge to the same point in approximately 100 samples.
## Conclusion
I showed how AI models have excellent differentiation capacity for gamma spectra, being able to surpass traditional methods in the task of detecting radionuclides when the spectrum has a low signal-to-noise ratio.
However, I later found out that the advantage of artificial intelligence models in relation to traditional methods decreases as the signal-to-noise ratio of the spectra increases. From the tests with real spectra (^210^Pb), it is not possible to observe a statistically significant advantage of the AI models over the traditional methods when the signal-to-noise ratio is close to the detection limit.
|
github_jupyter
|
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import pickle
import gzip
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import linear_model
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from helpers import plotcfg
from helpers import show_results
from helpers import preprocessing
from helpers import classic_algorithms
from helpers import learning_curve
X, y, d = pickle.load(gzip.open('../data/artificial.pickle', 'rb'), encoding='latin1')
model = make_pipeline(preprocessing.Cwt(), # Wavelet transform
StandardScaler(),
preprocessing.ForestSelect(k=10, trees=100), # Feature selection via Random Forest
linear_model.LogisticRegression())
clf = [(u'ML model', model),
(u'Second difference', classic_algorithms.SecondDifference(channel=50,
fwhm=3,
tol=1)),
(u'Library correlation', classic_algorithms.LibCorNID(channel=50,
sensitivity=0.8,
fwhm=3,
tol=1))]
results = show_results.summary('Peak Classification',
clf,
X,
y,
cv=True,
n_iter=1,
train_sizes=np.linspace(0.05,1.00,50),
n_jobs=3, # avoid hijacking all cpus
learnGraph=False,
rocGraph=True)
pd.DataFrame(results, columns=show_results.columns).set_index("Method")
learning_curve.plot_learning_curve(model, X, y, cv=5, train_sizes=np.linspace(0.05,1,40))
| 0.295332 | 0.794744 |
# Social Web - Facebook
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
import warnings
import random
from datetime import datetime
random.seed(datetime.now())
warnings.filterwarnings('ignore')
# Make plots larger
plt.rcParams['figure.figsize'] = (10, 6)
```
## Facebook API Access
Facebook implements OAuth 2.0 as its standard authentication mechanism.
You need to get an _access token_ by logging in to your Facebook account and go to https://developers.facebook.com/tools/explorer/ to obtain an ACCESS_TOKEN.
See [http://facebook-sdk.readthedocs.io/en/latest/api.html](http://facebook-sdk.readthedocs.io/en/latest/api.html)
```
# ACCESS_TOKEN = ''
ACCESS_TOKEN = 'EAACEdEose0cBsBX7P9vatRMM88wp5H2ZBNmOuZBGcrLsRyZC4YSPA3kI6mB3D2gH3VlZA2s1rBGNZCiN7SPmJolKv7IW4R9FpvtZA6EfIqkY2A94BltOEJ82sQZC55rfIJBU9KC93iSMUBlmmgaJqtJruODLYQJRwZD'
```
Note an _access token_ expires after a while you'll see a message like this if you try to use an expired token.
```python
{
"error": {
"message": "Error validating access token: Session has expired on Wednesday, 25-Oct-17 10:00:00 PDT. The current time is Wednesday, 25-Oct-17 18:55:58 PDT.",
"type": "OAuthException",
"code": 190,
"error_subcode": 463,
"fbtrace_id": "CaF9PR122/j"
}
}
```
## Making Graph API requests over HTTP
```
import requests # pip install requests
import json
base_url = 'https://graph.facebook.com/me'
# Specify which fields to retrieve
fields = 'id,name,likes'
url = '{0}?fields={1}&access_token={2}'.format(base_url, fields, ACCESS_TOKEN)
print(url)
content = requests.get(url).json()
print(json.dumps(content, indent=1))
```
## Querying the Graph API with Python
Facebook SDK for Python API reference:
[http://facebook-sdk.readthedocs.io/en/v2.0.0/api.html](http://facebook-sdk.readthedocs.io/en/v2.0.0/api.html)
```
import facebook # pip install facebook-sdk
# Valid API versions are '2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7'
# Create a connection to the Graph API with your access token
g = facebook.GraphAPI(ACCESS_TOKEN, version='2.7')
me=g.get_object('me')
print (me)
print (me['id'])
```
**get_connections**
Returns all connections for a given object as a dict.
Parameters
id – A string that is a unique ID for that particular resource.
connection_name - A string that specifies the connection or edge between objects, e.g., feed, friends, groups, likes, posts. If left empty, get_connections will simply return the authenticated user’s basic information.
```
g.get_connections(id=me['id'], connection_name='posts')
g.get_connections(id=me['id'], connection_name='friends')
g.get_connections(id=me['id'], connection_name='feed')
# Get the active user's friends.
friends = g.get_connections(id=me['id'], connection_name='friends')
friends
# Search for a location
# Northeastern University 42.3398° N, 71.0892° W
g.request("search", {'type': 'place', 'center': '42.3398, -71.0892', 'fields': 'name, location'})
# Search for a user
g.request("search", {'q': 'Nik Bear Brown', 'type': 'user'})
# Search for a page
g.request("search", {'q': 'Deep Learning', 'type': 'page'})
# Search for a page
g.request("search", {'q': 'Blake Shelton', 'type': 'page'})
```
## Counting total number of page fans
```
voice=['blakeshelton','MileyCyrus','jenniferhudson','OfficialAdamLevine']
feed = g.get_connections(voice[0], 'posts')
feed
def retrieve_page_feed(page_id, n_posts):
"""Retrieve the first n_posts from a page's feed in reverse
chronological order."""
feed = g.get_connections(page_id, 'posts')
posts = []
posts.extend(feed['data'])
while len(posts) < n_posts:
try:
feed = requests.get(feed['paging']['next']).json()
posts.extend(feed['data'])
except KeyError:
# When there are no more posts in the feed, break
print('Reached end of feed.')
break
if len(posts) > n_posts:
posts = posts[:n_posts]
print('{} items retrieved from feed'.format(len(posts)))
return posts
bs=retrieve_page_feed(voice[0], 33)
bs
bs[0]['id']
def fan_count(page_id):
return int(g.get_object(id=page_id, fields=['fan_count'])['fan_count'])
bs_fc=fan_count(voice[0])
bs_fc
def post_engagement(post_id):
likes = g.get_object(id=post_id,
fields=['likes.limit(0).summary(true)'])\
['likes']['summary']['total_count']
shares = g.get_object(id=post_id,
fields=['shares.limit(0).summary(true)'])\
['shares']['count']
comments = g.get_object(id=post_id,
fields=['comments.limit(0).summary(true)'])\
['comments']['summary']['total_count']
return likes, shares, comments
engagement = post_engagement(bs[0]['id'])
engagement # likes, shares, comments
def relative_engagement(e, total_fans):
a=[]
for i in e:
a.append(i/total_fans)
return a
# Measure the relative share of a page's fans engaging with a post
re=relative_engagement(engagement,bs_fc)
re
```
Last update October 3, 2017
The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT).
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
import warnings
import random
from datetime import datetime
random.seed(datetime.now())
warnings.filterwarnings('ignore')
# Make plots larger
plt.rcParams['figure.figsize'] = (10, 6)
# ACCESS_TOKEN = ''
ACCESS_TOKEN = 'EAACEdEose0cBsBX7P9vatRMM88wp5H2ZBNmOuZBGcrLsRyZC4YSPA3kI6mB3D2gH3VlZA2s1rBGNZCiN7SPmJolKv7IW4R9FpvtZA6EfIqkY2A94BltOEJ82sQZC55rfIJBU9KC93iSMUBlmmgaJqtJruODLYQJRwZD'
{
"error": {
"message": "Error validating access token: Session has expired on Wednesday, 25-Oct-17 10:00:00 PDT. The current time is Wednesday, 25-Oct-17 18:55:58 PDT.",
"type": "OAuthException",
"code": 190,
"error_subcode": 463,
"fbtrace_id": "CaF9PR122/j"
}
}
import requests # pip install requests
import json
base_url = 'https://graph.facebook.com/me'
# Specify which fields to retrieve
fields = 'id,name,likes'
url = '{0}?fields={1}&access_token={2}'.format(base_url, fields, ACCESS_TOKEN)
print(url)
content = requests.get(url).json()
print(json.dumps(content, indent=1))
import facebook # pip install facebook-sdk
# Valid API versions are '2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7'
# Create a connection to the Graph API with your access token
g = facebook.GraphAPI(ACCESS_TOKEN, version='2.7')
me=g.get_object('me')
print (me)
print (me['id'])
g.get_connections(id=me['id'], connection_name='posts')
g.get_connections(id=me['id'], connection_name='friends')
g.get_connections(id=me['id'], connection_name='feed')
# Get the active user's friends.
friends = g.get_connections(id=me['id'], connection_name='friends')
friends
# Search for a location
# Northeastern University 42.3398° N, 71.0892° W
g.request("search", {'type': 'place', 'center': '42.3398, -71.0892', 'fields': 'name, location'})
# Search for a user
g.request("search", {'q': 'Nik Bear Brown', 'type': 'user'})
# Search for a page
g.request("search", {'q': 'Deep Learning', 'type': 'page'})
# Search for a page
g.request("search", {'q': 'Blake Shelton', 'type': 'page'})
voice=['blakeshelton','MileyCyrus','jenniferhudson','OfficialAdamLevine']
feed = g.get_connections(voice[0], 'posts')
feed
def retrieve_page_feed(page_id, n_posts):
"""Retrieve the first n_posts from a page's feed in reverse
chronological order."""
feed = g.get_connections(page_id, 'posts')
posts = []
posts.extend(feed['data'])
while len(posts) < n_posts:
try:
feed = requests.get(feed['paging']['next']).json()
posts.extend(feed['data'])
except KeyError:
# When there are no more posts in the feed, break
print('Reached end of feed.')
break
if len(posts) > n_posts:
posts = posts[:n_posts]
print('{} items retrieved from feed'.format(len(posts)))
return posts
bs=retrieve_page_feed(voice[0], 33)
bs
bs[0]['id']
def fan_count(page_id):
return int(g.get_object(id=page_id, fields=['fan_count'])['fan_count'])
bs_fc=fan_count(voice[0])
bs_fc
def post_engagement(post_id):
likes = g.get_object(id=post_id,
fields=['likes.limit(0).summary(true)'])\
['likes']['summary']['total_count']
shares = g.get_object(id=post_id,
fields=['shares.limit(0).summary(true)'])\
['shares']['count']
comments = g.get_object(id=post_id,
fields=['comments.limit(0).summary(true)'])\
['comments']['summary']['total_count']
return likes, shares, comments
engagement = post_engagement(bs[0]['id'])
engagement # likes, shares, comments
def relative_engagement(e, total_fans):
a=[]
for i in e:
a.append(i/total_fans)
return a
# Measure the relative share of a page's fans engaging with a post
re=relative_engagement(engagement,bs_fc)
re
| 0.331661 | 0.668985 |
# Sign Language
```
from IPython.display import Image
Image("../input/amer_sign2.png")
```
# About the data
The original MNIST image dataset of handwritten digits is a popular benchmark for image-based machine learning methods but researchers have renewed efforts to update it and develop drop-in replacements that are more challenging for computer vision and original for real-world applications. As noted in one recent replacement called the Fashion-MNIST dataset, the Zalando researchers quoted the startling claim that "Most pairs of MNIST digits (784 total pixels per sample) can be distinguished pretty well by just one pixel". To stimulate the community to develop more drop-in replacements, the Sign Language MNIST is presented here and follows the same CSV format with labels and pixel values in single rows. The American Sign Language letter database of hand gestures represent a multi-class problem with 24 classes of letters (excluding J and Z which require motion).
Load the dataset
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv('../input/sign_mnist_train.csv')
test = pd.read_csv('../input/sign_mnist_test.csv')
train.head()
train.shape
```
The data set is given in the form of labels and pixel value ranging from pixel 1 to pixel 784 which is 28 * 28 image.
Let's see what does each sign means
```
Image("../input/american_sign_language.PNG")
```
Each letter indicates a sign produced by our fingers. We will apply deep learning to these images to make sure our model can understand what sign indicated what letter
```
labels = train['label'].values
unique_val = np.array(labels)
np.unique(unique_val)
```
# Data exploration
```
plt.figure(figsize = (18,8))
sns.countplot(x =labels)
```
As you can see each one is almost equally distributed
```
train.drop('label', axis = 1, inplace = True)
```
We are droping the label coloumn from the training set
Re shaping the images
```
images = train.values
images = np.array([np.reshape(i, (28, 28)) for i in images])
images = np.array([i.flatten() for i in images])
```
Since our target variable are in categorical(nomial) so we are using label binarizer
```
from sklearn.preprocessing import LabelBinarizer
label_binrizer = LabelBinarizer()
labels = label_binrizer.fit_transform(labels)
labels
```
Lets see how the images look
```
plt.imshow(images[0].reshape(28,28))
```
Spliting the dataset into train(70%) and test(30%)
```
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size = 0.3, random_state = 101)
```
For deep learning i am using keras library
```
import keras
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
```
Creating the batch size to 128 and using 50 epochs
```
batch_size = 128
num_classes = 24
epochs = 50
```
Normalizing the training and test data
```
x_train = x_train / 255
x_test = x_test / 255
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
```
Visualizing the image after normalizing
```
plt.imshow(x_train[0].reshape(28,28))
```
# CNN Model
```
model = Sequential()
model.add(Conv2D(64, kernel_size=(3,3), activation = 'relu', input_shape=(28, 28 ,1) ))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Flatten())
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.20))
model.add(Dense(num_classes, activation = 'softmax'))
model.compile(loss = keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
history = model.fit(x_train, y_train, validation_data = (x_test, y_test), epochs=epochs, batch_size=batch_size)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title("Accuracy")
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(['train','test'])
plt.show()
```
As you can see, the number of epochs increase the accuracy also increases.
Let's validate with the test data
```
test_labels = test['label']
test.drop('label', axis = 1, inplace = True)
test_images = test.values
test_images = np.array([np.reshape(i, (28, 28)) for i in test_images])
test_images = np.array([i.flatten() for i in test_images])
test_labels = label_binrizer.fit_transform(test_labels)
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1)
test_images.shape
```
Predecting with test images
```
y_pred = model.predict(test_images)
from sklearn.metrics import accuracy_score
accuracy_score(test_labels, y_pred.round())
```
As we can see we got a really great accuracy
We can increate the accuracy by tuning the hyper parameters of the model like playing with different activation functions and using different loss functions
|
github_jupyter
|
from IPython.display import Image
Image("../input/amer_sign2.png")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv('../input/sign_mnist_train.csv')
test = pd.read_csv('../input/sign_mnist_test.csv')
train.head()
train.shape
Image("../input/american_sign_language.PNG")
labels = train['label'].values
unique_val = np.array(labels)
np.unique(unique_val)
plt.figure(figsize = (18,8))
sns.countplot(x =labels)
train.drop('label', axis = 1, inplace = True)
images = train.values
images = np.array([np.reshape(i, (28, 28)) for i in images])
images = np.array([i.flatten() for i in images])
from sklearn.preprocessing import LabelBinarizer
label_binrizer = LabelBinarizer()
labels = label_binrizer.fit_transform(labels)
labels
plt.imshow(images[0].reshape(28,28))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size = 0.3, random_state = 101)
import keras
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
batch_size = 128
num_classes = 24
epochs = 50
x_train = x_train / 255
x_test = x_test / 255
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
plt.imshow(x_train[0].reshape(28,28))
model = Sequential()
model.add(Conv2D(64, kernel_size=(3,3), activation = 'relu', input_shape=(28, 28 ,1) ))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Flatten())
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.20))
model.add(Dense(num_classes, activation = 'softmax'))
model.compile(loss = keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
history = model.fit(x_train, y_train, validation_data = (x_test, y_test), epochs=epochs, batch_size=batch_size)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title("Accuracy")
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(['train','test'])
plt.show()
test_labels = test['label']
test.drop('label', axis = 1, inplace = True)
test_images = test.values
test_images = np.array([np.reshape(i, (28, 28)) for i in test_images])
test_images = np.array([i.flatten() for i in test_images])
test_labels = label_binrizer.fit_transform(test_labels)
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1)
test_images.shape
y_pred = model.predict(test_images)
from sklearn.metrics import accuracy_score
accuracy_score(test_labels, y_pred.round())
| 0.73077 | 0.98847 |
# Dataset Generation
## Includes
```
# mass includes
import os
import pickle
import pyexiv2 as exiv2
import rawpy as rp
import numpy as np
import torch as t
from rawpy import HighlightMode
from tqdm.notebook import tqdm
from torch.utils import data
```
## Initialization
```
# configuration
data_root = '/home/lab/Documents/ssd/DJI' # dataset path
save_root = '/home/lab/Documents/ssd/r2rSet' # save path
file_ext = '.DNG' # extension of raw file
train_num = 710 # num of images for training
patch_size = (400, 300) # size of each patch
```
## RAW data manipulation
```
# get file list
file_list = [file for file in os.listdir(data_root) if file_ext in file]
file_list.sort()
# make new folders
train_path = os.path.join(save_root, 'train')
os.makedirs(train_path)
val_path = os.path.join(save_root, 'val')
os.makedirs(val_path)
for index, file in tqdm(enumerate(file_list),
desc='progress',
total=len(file_list)):
# find black, saturation, and whitebalance
img_md = exiv2.ImageMetadata(os.path.join(data_root, file))
img_md.read()
blk_level = img_md['Exif.SubImage1.BlackLevel'].value
sat_level = img_md['Exif.SubImage1.WhiteLevel'].value
cam_wb = img_md['Exif.Image.AsShotNeutral'].value
# convert flat Bayer pattern to 4D tensor (RGGB)
raw_img = rp.imread(os.path.join(data_root, file))
flat_bayer = raw_img.raw_image_visible
raw_data = np.stack((flat_bayer[0::2, 0::2], flat_bayer[0::2, 1::2],
flat_bayer[1::2, 0::2], flat_bayer[1::2, 1::2]),
axis=2)
# get ground-truth sRGB image
gt_img = raw_img.postprocess(use_camera_wb=True,
output_bps=16,
no_auto_bright=True,
adjust_maximum_thr=0.0,
highlight_mode=HighlightMode.Ignore)
# split to small patches
part_idx = 0
raw_hei = gt_img.shape[0] / 2
raw_wid = gt_img.shape[1] / 2
for i in range(0, int(raw_hei / patch_size[1])):
for j in range(0, int(raw_wid / patch_size[0])):
crop_h = i * patch_size[1]
crop_w = j * patch_size[0]
raw_patch = raw_data[crop_h:crop_h + patch_size[1],
crop_w:crop_w + patch_size[0], :]
gt_patch = gt_img[2 * crop_h:2 * (crop_h + patch_size[1]),
2 * crop_w:2 * (crop_w + patch_size[0]), :]
# save to files
patch = {}
patch['blk_level'] = np.array(blk_level, dtype=np.uint16)
patch['sat_level'] = np.array(sat_level, dtype=np.uint16)
patch['cam_wb'] = np.array(cam_wb, dtype=np.float32)
patch['raw'] = np.transpose(raw_patch, (2, 0, 1))
patch['img'] = np.transpose(gt_patch, (2, 0, 1))
if index < train_num:
file_path = os.path.join(
train_path, '%s_p%03d.pkl' % (file[:-4], part_idx))
else:
file_path = os.path.join(
val_path, '%s_p%03d.pkl' % (file[:-4], part_idx))
with open(file_path, 'wb') as pkl_file:
pickle.dump(patch, pkl_file)
# update part index
part_idx += 1
```
|
github_jupyter
|
# mass includes
import os
import pickle
import pyexiv2 as exiv2
import rawpy as rp
import numpy as np
import torch as t
from rawpy import HighlightMode
from tqdm.notebook import tqdm
from torch.utils import data
# configuration
data_root = '/home/lab/Documents/ssd/DJI' # dataset path
save_root = '/home/lab/Documents/ssd/r2rSet' # save path
file_ext = '.DNG' # extension of raw file
train_num = 710 # num of images for training
patch_size = (400, 300) # size of each patch
# get file list
file_list = [file for file in os.listdir(data_root) if file_ext in file]
file_list.sort()
# make new folders
train_path = os.path.join(save_root, 'train')
os.makedirs(train_path)
val_path = os.path.join(save_root, 'val')
os.makedirs(val_path)
for index, file in tqdm(enumerate(file_list),
desc='progress',
total=len(file_list)):
# find black, saturation, and whitebalance
img_md = exiv2.ImageMetadata(os.path.join(data_root, file))
img_md.read()
blk_level = img_md['Exif.SubImage1.BlackLevel'].value
sat_level = img_md['Exif.SubImage1.WhiteLevel'].value
cam_wb = img_md['Exif.Image.AsShotNeutral'].value
# convert flat Bayer pattern to 4D tensor (RGGB)
raw_img = rp.imread(os.path.join(data_root, file))
flat_bayer = raw_img.raw_image_visible
raw_data = np.stack((flat_bayer[0::2, 0::2], flat_bayer[0::2, 1::2],
flat_bayer[1::2, 0::2], flat_bayer[1::2, 1::2]),
axis=2)
# get ground-truth sRGB image
gt_img = raw_img.postprocess(use_camera_wb=True,
output_bps=16,
no_auto_bright=True,
adjust_maximum_thr=0.0,
highlight_mode=HighlightMode.Ignore)
# split to small patches
part_idx = 0
raw_hei = gt_img.shape[0] / 2
raw_wid = gt_img.shape[1] / 2
for i in range(0, int(raw_hei / patch_size[1])):
for j in range(0, int(raw_wid / patch_size[0])):
crop_h = i * patch_size[1]
crop_w = j * patch_size[0]
raw_patch = raw_data[crop_h:crop_h + patch_size[1],
crop_w:crop_w + patch_size[0], :]
gt_patch = gt_img[2 * crop_h:2 * (crop_h + patch_size[1]),
2 * crop_w:2 * (crop_w + patch_size[0]), :]
# save to files
patch = {}
patch['blk_level'] = np.array(blk_level, dtype=np.uint16)
patch['sat_level'] = np.array(sat_level, dtype=np.uint16)
patch['cam_wb'] = np.array(cam_wb, dtype=np.float32)
patch['raw'] = np.transpose(raw_patch, (2, 0, 1))
patch['img'] = np.transpose(gt_patch, (2, 0, 1))
if index < train_num:
file_path = os.path.join(
train_path, '%s_p%03d.pkl' % (file[:-4], part_idx))
else:
file_path = os.path.join(
val_path, '%s_p%03d.pkl' % (file[:-4], part_idx))
with open(file_path, 'wb') as pkl_file:
pickle.dump(patch, pkl_file)
# update part index
part_idx += 1
| 0.271638 | 0.597549 |
```
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
import re
import time
import collections
import os
def build_dataset(words, n_words, atleast=1):
count = [['GO', 0], ['PAD', 1], ['EOS', 2], ['UNK', 3]]
counter = collections.Counter(words).most_common(n_words)
counter = [i for i in counter if i[1] >= atleast]
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
lines = open('movie_lines.txt', encoding='utf-8', errors='ignore').read().split('\n')
conv_lines = open('movie_conversations.txt', encoding='utf-8', errors='ignore').read().split('\n')
id2line = {}
for line in lines:
_line = line.split(' +++$+++ ')
if len(_line) == 5:
id2line[_line[0]] = _line[4]
convs = [ ]
for line in conv_lines[:-1]:
_line = line.split(' +++$+++ ')[-1][1:-1].replace("'","").replace(" ","")
convs.append(_line.split(','))
questions = []
answers = []
for conv in convs:
for i in range(len(conv)-1):
questions.append(id2line[conv[i]])
answers.append(id2line[conv[i+1]])
def clean_text(text):
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[-()\"#/@;:<>{}`+=~|.!?,]", "", text)
return ' '.join([i.strip() for i in filter(None, text.split())])
clean_questions = []
for question in questions:
clean_questions.append(clean_text(question))
clean_answers = []
for answer in answers:
clean_answers.append(clean_text(answer))
min_line_length = 2
max_line_length = 5
short_questions_temp = []
short_answers_temp = []
i = 0
for question in clean_questions:
if len(question.split()) >= min_line_length and len(question.split()) <= max_line_length:
short_questions_temp.append(question)
short_answers_temp.append(clean_answers[i])
i += 1
short_questions = []
short_answers = []
i = 0
for answer in short_answers_temp:
if len(answer.split()) >= min_line_length and len(answer.split()) <= max_line_length:
short_answers.append(answer)
short_questions.append(short_questions_temp[i])
i += 1
question_test = short_questions[500:550]
answer_test = short_answers[500:550]
short_questions = short_questions[:500]
short_answers = short_answers[:500]
concat_from = ' '.join(short_questions+question_test).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)
print('vocab from size: %d'%(vocabulary_size_from))
print('Most common words', count_from[4:10])
print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])
print('filtered vocab size:',len(dictionary_from))
print("% of vocab used: {}%".format(round(len(dictionary_from)/vocabulary_size_from,4)*100))
concat_to = ' '.join(short_answers+answer_test).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print('vocab from size: %d'%(vocabulary_size_to))
print('Most common words', count_to[4:10])
print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])
print('filtered vocab size:',len(dictionary_to))
print("% of vocab used: {}%".format(round(len(dictionary_to)/vocabulary_size_to,4)*100))
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']
for i in range(len(short_answers)):
short_answers[i] += ' EOS'
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
ints.append(dic.get(k,UNK))
X.append(ints)
return X
def pad_sentence_batch(sentence_batch, pad_int, maxlen):
padded_seqs = []
seq_lens = []
max_sentence_len = maxlen
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(maxlen)
return padded_seqs, seq_lens
def check_accuracy(logits, Y):
acc = 0
for i in range(logits.shape[0]):
internal_acc = 0
count = 0
for k in range(len(Y[i])):
try:
if Y[i][k] == logits[i][k]:
internal_acc += 1
count += 1
if Y[i][k] == EOS:
break
except:
break
acc += (internal_acc / count)
return acc / logits.shape[0]
X = str_idx(short_questions, dictionary_from)
Y = str_idx(short_answers, dictionary_to)
X_test = str_idx(question_test, dictionary_from)
Y_test = str_idx(answer_test, dictionary_from)
maxlen_question = max([len(x) for x in X]) * 2
maxlen_answer = max([len(y) for y in Y]) * 2
def hop_forward(memory_o, memory_i, response_proj, inputs_len, questions_len):
match = memory_i
match = pre_softmax_masking(match, inputs_len)
match = tf.nn.softmax(match)
match = post_softmax_masking(match, questions_len)
response = tf.multiply(match, memory_o)
return response_proj(response)
def pre_softmax_masking(x, seq_len):
paddings = tf.fill(tf.shape(x), float('-inf'))
T = tf.shape(x)[1]
max_seq_len = tf.shape(x)[2]
masks = tf.sequence_mask(seq_len, max_seq_len, dtype = tf.float32)
masks = tf.tile(tf.expand_dims(masks, 1), [1, T, 1])
return tf.where(tf.equal(masks, 0), paddings, x)
def post_softmax_masking(x, seq_len):
T = tf.shape(x)[2]
max_seq_len = tf.shape(x)[1]
masks = tf.sequence_mask(seq_len, max_seq_len, dtype = tf.float32)
masks = tf.tile(tf.expand_dims(masks, -1), [1, 1, T])
return x * masks
def shift_right(x):
batch_size = tf.shape(x)[0]
start = tf.to_int32(tf.fill([batch_size, 1], GO))
return tf.concat([start, x[:, :-1]], 1)
def embed_seq(x, vocab_size, zero_pad = True):
lookup_table = tf.get_variable(
'lookup_table', [vocab_size, size_layer], tf.float32
)
if zero_pad:
lookup_table = tf.concat(
(tf.zeros([1, size_layer]), lookup_table[1:, :]), axis = 0
)
return tf.nn.embedding_lookup(lookup_table, x)
def position_encoding(sentence_size, embedding_size):
encoding = np.ones((embedding_size, sentence_size), dtype = np.float32)
ls = sentence_size + 1
le = embedding_size + 1
for i in range(1, le):
for j in range(1, ls):
encoding[i - 1, j - 1] = (i - (le - 1) / 2) * (j - (ls - 1) / 2)
encoding = 1 + 4 * encoding / embedding_size / sentence_size
return np.transpose(encoding)
def quest_mem(x, vocab_size, max_quest_len):
x = embed_seq(x, vocab_size)
pos = position_encoding(max_quest_len, size_layer)
return x * pos
class QA:
def __init__(self, vocab_size_from, vocab_size_to, size_layer, learning_rate, n_hops = 3):
self.X = tf.placeholder(tf.int32,[None,None])
self.Y = tf.placeholder(tf.int32,[None,None])
self.X_seq_len = tf.placeholder(tf.int32, [None])
self.Y_seq_len = tf.placeholder(tf.int32, [None])
max_quest_len = maxlen_question
max_answer_len = maxlen_answer
lookup_table = tf.get_variable('lookup_table', [vocab_size_from, size_layer], tf.float32)
with tf.variable_scope('memory_o'):
memory_o = quest_mem(self.X, vocab_size_from, max_quest_len)
with tf.variable_scope('memory_i'):
memory_i = quest_mem(self.X, vocab_size_from, max_quest_len)
with tf.variable_scope('interaction'):
response_proj = tf.layers.Dense(size_layer)
for _ in range(n_hops):
answer = hop_forward(memory_o,
memory_i,
response_proj,
self.X_seq_len,
self.X_seq_len)
memory_i = answer
embedding = tf.Variable(tf.random_uniform([vocab_size_to, size_layer], -1, 1))
cell = tf.nn.rnn_cell.BasicRNNCell(size_layer)
vocab_proj = tf.layers.Dense(vocab_size_to)
state_proj = tf.layers.Dense(size_layer)
init_state = state_proj(tf.layers.flatten(answer))
helper = tf.contrib.seq2seq.TrainingHelper(
inputs = tf.nn.embedding_lookup(embedding, shift_right(self.Y)),
sequence_length = tf.to_int32(self.Y_seq_len))
decoder = tf.contrib.seq2seq.BasicDecoder(cell = cell,
helper = helper,
initial_state = init_state,
output_layer = vocab_proj)
decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder = decoder,
maximum_iterations = max_answer_len)
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding = embedding,
start_tokens = tf.tile(
tf.constant([GO],
dtype=tf.int32),
[tf.shape(init_state)[0]]),
end_token = EOS)
decoder = tf.contrib.seq2seq.BasicDecoder(
cell = cell,
helper = helper,
initial_state = init_state,
output_layer = vocab_proj)
predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = decoder,
maximum_iterations = max_answer_len)
self.training_logits = decoder_output.rnn_output
self.predicting_ids = predicting_decoder_output.sample_id
self.logits = decoder_output.sample_id
masks = tf.sequence_mask(self.Y_seq_len, max_answer_len, dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
epoch = 20
batch_size = 64
size_layer = 256
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = QA(len(dictionary_from), len(dictionary_to), size_layer, 1e-3)
sess.run(tf.global_variables_initializer())
for i in range(epoch):
total_loss, total_accuracy = 0, 0
for k in range(0, (len(short_questions) // batch_size) * batch_size, batch_size):
batch_x, seq_x = pad_sentence_batch(X[k: k+batch_size], PAD, maxlen_question)
batch_y, seq_y = pad_sentence_batch(Y[k: k+batch_size], PAD, maxlen_answer)
predicted, loss, _ = sess.run([model.logits, model.cost, model.optimizer],
feed_dict={model.X:batch_x,
model.Y:batch_y,
model.X_seq_len:seq_x,
model.Y_seq_len:seq_y})
total_loss += loss
total_accuracy += check_accuracy(predicted,batch_y)
total_loss /= (len(short_questions) // batch_size)
total_accuracy /= (len(short_questions) // batch_size)
print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))
```
|
github_jupyter
|
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
import re
import time
import collections
import os
def build_dataset(words, n_words, atleast=1):
count = [['GO', 0], ['PAD', 1], ['EOS', 2], ['UNK', 3]]
counter = collections.Counter(words).most_common(n_words)
counter = [i for i in counter if i[1] >= atleast]
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
lines = open('movie_lines.txt', encoding='utf-8', errors='ignore').read().split('\n')
conv_lines = open('movie_conversations.txt', encoding='utf-8', errors='ignore').read().split('\n')
id2line = {}
for line in lines:
_line = line.split(' +++$+++ ')
if len(_line) == 5:
id2line[_line[0]] = _line[4]
convs = [ ]
for line in conv_lines[:-1]:
_line = line.split(' +++$+++ ')[-1][1:-1].replace("'","").replace(" ","")
convs.append(_line.split(','))
questions = []
answers = []
for conv in convs:
for i in range(len(conv)-1):
questions.append(id2line[conv[i]])
answers.append(id2line[conv[i+1]])
def clean_text(text):
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[-()\"#/@;:<>{}`+=~|.!?,]", "", text)
return ' '.join([i.strip() for i in filter(None, text.split())])
clean_questions = []
for question in questions:
clean_questions.append(clean_text(question))
clean_answers = []
for answer in answers:
clean_answers.append(clean_text(answer))
min_line_length = 2
max_line_length = 5
short_questions_temp = []
short_answers_temp = []
i = 0
for question in clean_questions:
if len(question.split()) >= min_line_length and len(question.split()) <= max_line_length:
short_questions_temp.append(question)
short_answers_temp.append(clean_answers[i])
i += 1
short_questions = []
short_answers = []
i = 0
for answer in short_answers_temp:
if len(answer.split()) >= min_line_length and len(answer.split()) <= max_line_length:
short_answers.append(answer)
short_questions.append(short_questions_temp[i])
i += 1
question_test = short_questions[500:550]
answer_test = short_answers[500:550]
short_questions = short_questions[:500]
short_answers = short_answers[:500]
concat_from = ' '.join(short_questions+question_test).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)
print('vocab from size: %d'%(vocabulary_size_from))
print('Most common words', count_from[4:10])
print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])
print('filtered vocab size:',len(dictionary_from))
print("% of vocab used: {}%".format(round(len(dictionary_from)/vocabulary_size_from,4)*100))
concat_to = ' '.join(short_answers+answer_test).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print('vocab from size: %d'%(vocabulary_size_to))
print('Most common words', count_to[4:10])
print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])
print('filtered vocab size:',len(dictionary_to))
print("% of vocab used: {}%".format(round(len(dictionary_to)/vocabulary_size_to,4)*100))
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']
for i in range(len(short_answers)):
short_answers[i] += ' EOS'
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
ints.append(dic.get(k,UNK))
X.append(ints)
return X
def pad_sentence_batch(sentence_batch, pad_int, maxlen):
padded_seqs = []
seq_lens = []
max_sentence_len = maxlen
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(maxlen)
return padded_seqs, seq_lens
def check_accuracy(logits, Y):
acc = 0
for i in range(logits.shape[0]):
internal_acc = 0
count = 0
for k in range(len(Y[i])):
try:
if Y[i][k] == logits[i][k]:
internal_acc += 1
count += 1
if Y[i][k] == EOS:
break
except:
break
acc += (internal_acc / count)
return acc / logits.shape[0]
X = str_idx(short_questions, dictionary_from)
Y = str_idx(short_answers, dictionary_to)
X_test = str_idx(question_test, dictionary_from)
Y_test = str_idx(answer_test, dictionary_from)
maxlen_question = max([len(x) for x in X]) * 2
maxlen_answer = max([len(y) for y in Y]) * 2
def hop_forward(memory_o, memory_i, response_proj, inputs_len, questions_len):
match = memory_i
match = pre_softmax_masking(match, inputs_len)
match = tf.nn.softmax(match)
match = post_softmax_masking(match, questions_len)
response = tf.multiply(match, memory_o)
return response_proj(response)
def pre_softmax_masking(x, seq_len):
paddings = tf.fill(tf.shape(x), float('-inf'))
T = tf.shape(x)[1]
max_seq_len = tf.shape(x)[2]
masks = tf.sequence_mask(seq_len, max_seq_len, dtype = tf.float32)
masks = tf.tile(tf.expand_dims(masks, 1), [1, T, 1])
return tf.where(tf.equal(masks, 0), paddings, x)
def post_softmax_masking(x, seq_len):
T = tf.shape(x)[2]
max_seq_len = tf.shape(x)[1]
masks = tf.sequence_mask(seq_len, max_seq_len, dtype = tf.float32)
masks = tf.tile(tf.expand_dims(masks, -1), [1, 1, T])
return x * masks
def shift_right(x):
batch_size = tf.shape(x)[0]
start = tf.to_int32(tf.fill([batch_size, 1], GO))
return tf.concat([start, x[:, :-1]], 1)
def embed_seq(x, vocab_size, zero_pad = True):
lookup_table = tf.get_variable(
'lookup_table', [vocab_size, size_layer], tf.float32
)
if zero_pad:
lookup_table = tf.concat(
(tf.zeros([1, size_layer]), lookup_table[1:, :]), axis = 0
)
return tf.nn.embedding_lookup(lookup_table, x)
def position_encoding(sentence_size, embedding_size):
encoding = np.ones((embedding_size, sentence_size), dtype = np.float32)
ls = sentence_size + 1
le = embedding_size + 1
for i in range(1, le):
for j in range(1, ls):
encoding[i - 1, j - 1] = (i - (le - 1) / 2) * (j - (ls - 1) / 2)
encoding = 1 + 4 * encoding / embedding_size / sentence_size
return np.transpose(encoding)
def quest_mem(x, vocab_size, max_quest_len):
x = embed_seq(x, vocab_size)
pos = position_encoding(max_quest_len, size_layer)
return x * pos
class QA:
def __init__(self, vocab_size_from, vocab_size_to, size_layer, learning_rate, n_hops = 3):
self.X = tf.placeholder(tf.int32,[None,None])
self.Y = tf.placeholder(tf.int32,[None,None])
self.X_seq_len = tf.placeholder(tf.int32, [None])
self.Y_seq_len = tf.placeholder(tf.int32, [None])
max_quest_len = maxlen_question
max_answer_len = maxlen_answer
lookup_table = tf.get_variable('lookup_table', [vocab_size_from, size_layer], tf.float32)
with tf.variable_scope('memory_o'):
memory_o = quest_mem(self.X, vocab_size_from, max_quest_len)
with tf.variable_scope('memory_i'):
memory_i = quest_mem(self.X, vocab_size_from, max_quest_len)
with tf.variable_scope('interaction'):
response_proj = tf.layers.Dense(size_layer)
for _ in range(n_hops):
answer = hop_forward(memory_o,
memory_i,
response_proj,
self.X_seq_len,
self.X_seq_len)
memory_i = answer
embedding = tf.Variable(tf.random_uniform([vocab_size_to, size_layer], -1, 1))
cell = tf.nn.rnn_cell.BasicRNNCell(size_layer)
vocab_proj = tf.layers.Dense(vocab_size_to)
state_proj = tf.layers.Dense(size_layer)
init_state = state_proj(tf.layers.flatten(answer))
helper = tf.contrib.seq2seq.TrainingHelper(
inputs = tf.nn.embedding_lookup(embedding, shift_right(self.Y)),
sequence_length = tf.to_int32(self.Y_seq_len))
decoder = tf.contrib.seq2seq.BasicDecoder(cell = cell,
helper = helper,
initial_state = init_state,
output_layer = vocab_proj)
decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder = decoder,
maximum_iterations = max_answer_len)
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding = embedding,
start_tokens = tf.tile(
tf.constant([GO],
dtype=tf.int32),
[tf.shape(init_state)[0]]),
end_token = EOS)
decoder = tf.contrib.seq2seq.BasicDecoder(
cell = cell,
helper = helper,
initial_state = init_state,
output_layer = vocab_proj)
predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = decoder,
maximum_iterations = max_answer_len)
self.training_logits = decoder_output.rnn_output
self.predicting_ids = predicting_decoder_output.sample_id
self.logits = decoder_output.sample_id
masks = tf.sequence_mask(self.Y_seq_len, max_answer_len, dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
epoch = 20
batch_size = 64
size_layer = 256
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = QA(len(dictionary_from), len(dictionary_to), size_layer, 1e-3)
sess.run(tf.global_variables_initializer())
for i in range(epoch):
total_loss, total_accuracy = 0, 0
for k in range(0, (len(short_questions) // batch_size) * batch_size, batch_size):
batch_x, seq_x = pad_sentence_batch(X[k: k+batch_size], PAD, maxlen_question)
batch_y, seq_y = pad_sentence_batch(Y[k: k+batch_size], PAD, maxlen_answer)
predicted, loss, _ = sess.run([model.logits, model.cost, model.optimizer],
feed_dict={model.X:batch_x,
model.Y:batch_y,
model.X_seq_len:seq_x,
model.Y_seq_len:seq_y})
total_loss += loss
total_accuracy += check_accuracy(predicted,batch_y)
total_loss /= (len(short_questions) // batch_size)
total_accuracy /= (len(short_questions) // batch_size)
print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))
| 0.18591 | 0.383901 |
均线 二
我是格子衫小C(微信公众号:格子衫小C),今天开始,我们一起来研究均线指标(Indicator)。
```
import pandas as pd
from datetime import datetime
import trdb2py
isStaticImg = False
width = 960
height = 768
pd.options.display.max_columns = None
pd.options.display.max_rows = None
trdb2cfg = trdb2py.loadConfig('./trdb2.yaml')
# 具体基金
# asset = 'jrj.510310'
asset = 'jqdata.000300_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# 初始资金池
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
# 卖出参数,全部卖出
paramssell = trdb2py.trading2_pb2.SellParams(
perVolume=1,
)
asset = 'jqdata.513100_XSHG|1d'
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='纳斯达克指数',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
ret = trdb2py.getAssetCandles2(trdb2cfg, asset, tsStart, tsEnd, indicators=['sma.2', 'sma.5', 'sma.30', 'sma.60', 'sma.120', 'sma.240'])
# print(ret)
# ret
trdb2py.showAssetCandles2('纳斯达克指数', ret, indicators=['sma.2', 'sma.5', 'sma.30', 'sma.60', 'sma.120', 'sma.240'], toImg=isStaticImg, width=width, height=height)
```
接下来,测一下均线
```
lstparams = []
asset = 'jqdata.513100_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
for ema in range(2, 561):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=4)
trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaseline])
dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl1[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
lstparams = []
lstems = [379]
asset = 'jqdata.513100_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
for ema in lstems:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix1 = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=2.3)
sts = trdb2py.getFirstCtrlTs(lstpnlmix1[0])
trdb2py.showPNLs([lstpnlmix1[0], pnlBaseline], toImg=isStaticImg, width=width, height=height, startTs=sts)
asset = 'jqdata.513500_XSHG|1d'
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='baseline',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
asset = 'jqdata.513500_XSHG|1d'
# asset = 'jrj.096001'
lstparams = []
for ema in range(2, 241):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=2.3)
trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaseline])
# dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
asset = 'jqdata.513600_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='baseline',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
asset = 'jqdata.513600_XSHG|1d'
# asset = 'jrj.096001'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
lstparams = []
for ema in range(2, 241):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=2.3)
trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaseline])
# dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
asset = 'jqdata.000032_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2010-11-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='上证能源',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
asset = 'jqdata.000032_XSHG|1d'
# asset = 'jrj.096001'
lstparams = []
for ema in range(2, 241):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=3.5)
trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaseline])
# dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
asset = 'jqdata.000036_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='上证消费',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
asset = 'jqdata.000036_XSHG|1d'
# asset = 'jrj.096001'
lstparams = []
for ema in range(2, 241):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=4)
trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaseline])
# dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
lstparams = []
lstems = [240]
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
for ema in lstems:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix1 = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=2.3)
sts = trdb2py.getFirstCtrlTs(lstpnlmix1[0])
trdb2py.showPNLs([lstpnlmix1[0], pnlBaseline], toImg=isStaticImg, width=width, height=height, startTs=sts)
# lstems = [2, 42, 60]
# 起始时间,0表示从最开始算起
tsStart = 0
# tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
tsEnd = -1
# tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='中证消费指数',
)
pnlBaselineF = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaselineF, toImg=isStaticImg, width=width, height=height)
lstparams = []
lstems = [203, 71]
# 起始时间,0表示从最开始算起
tsStart = 0
# tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
tsEnd = -1
# tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
for ema in lstems:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=2.3)
trdb2py.showPNLs(lstpnlmix + [pnlBaselineF], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaselineF])
# dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
```
|
github_jupyter
|
import pandas as pd
from datetime import datetime
import trdb2py
isStaticImg = False
width = 960
height = 768
pd.options.display.max_columns = None
pd.options.display.max_rows = None
trdb2cfg = trdb2py.loadConfig('./trdb2.yaml')
# 具体基金
# asset = 'jrj.510310'
asset = 'jqdata.000300_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# 初始资金池
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
# 卖出参数,全部卖出
paramssell = trdb2py.trading2_pb2.SellParams(
perVolume=1,
)
asset = 'jqdata.513100_XSHG|1d'
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='纳斯达克指数',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
ret = trdb2py.getAssetCandles2(trdb2cfg, asset, tsStart, tsEnd, indicators=['sma.2', 'sma.5', 'sma.30', 'sma.60', 'sma.120', 'sma.240'])
# print(ret)
# ret
trdb2py.showAssetCandles2('纳斯达克指数', ret, indicators=['sma.2', 'sma.5', 'sma.30', 'sma.60', 'sma.120', 'sma.240'], toImg=isStaticImg, width=width, height=height)
lstparams = []
asset = 'jqdata.513100_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
for ema in range(2, 561):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=4)
trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaseline])
dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl1[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
lstparams = []
lstems = [379]
asset = 'jqdata.513100_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
for ema in lstems:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix1 = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=2.3)
sts = trdb2py.getFirstCtrlTs(lstpnlmix1[0])
trdb2py.showPNLs([lstpnlmix1[0], pnlBaseline], toImg=isStaticImg, width=width, height=height, startTs=sts)
asset = 'jqdata.513500_XSHG|1d'
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='baseline',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
asset = 'jqdata.513500_XSHG|1d'
# asset = 'jrj.096001'
lstparams = []
for ema in range(2, 241):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=2.3)
trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaseline])
# dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
asset = 'jqdata.513600_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='baseline',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
asset = 'jqdata.513600_XSHG|1d'
# asset = 'jrj.096001'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
lstparams = []
for ema in range(2, 241):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=2.3)
trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaseline])
# dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
asset = 'jqdata.000032_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2010-11-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='上证能源',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
asset = 'jqdata.000032_XSHG|1d'
# asset = 'jrj.096001'
lstparams = []
for ema in range(2, 241):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=3.5)
trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaseline])
# dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
asset = 'jqdata.000036_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='上证消费',
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
asset = 'jqdata.000036_XSHG|1d'
# asset = 'jrj.096001'
lstparams = []
for ema in range(2, 241):
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=4)
trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaseline])
# dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
lstparams = []
lstems = [240]
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
# tsEnd = -1
tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
for ema in lstems:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix1 = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=2.3)
sts = trdb2py.getFirstCtrlTs(lstpnlmix1[0])
trdb2py.showPNLs([lstpnlmix1[0], pnlBaseline], toImg=isStaticImg, width=width, height=height, startTs=sts)
# lstems = [2, 42, 60]
# 起始时间,0表示从最开始算起
tsStart = 0
# tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
tsEnd = -1
# tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='中证消费指数',
)
pnlBaselineF = trdb2py.simTrading(trdb2cfg, p0)
trdb2py.showPNL(pnlBaselineF, toImg=isStaticImg, width=width, height=height)
lstparams = []
lstems = [203, 71]
# 起始时间,0表示从最开始算起
tsStart = 0
# tsStart = int(trdb2py.str2timestamp('2013-05-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
tsEnd = -1
# tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
for ema in lstems:
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['upcross'],
strVals=['ta-sma.{}'.format(ema)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='waittostart',
vals=[ema],
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['downcross'],
strVals=['ta-sma.{}'.format(ema)],
)
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
s0.buy.extend([buy0, buy1])
s0.sell.extend([sell0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
lstparams.append(trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='sma.{}'.format(ema),
))
lstpnlmix = trdb2py.simTradings(trdb2cfg, lstparams, ignoreTotalReturn=2.3)
trdb2py.showPNLs(lstpnlmix + [pnlBaselineF], toImg=isStaticImg, width=width, height=height)
dfpnl = trdb2py.buildPNLReport(lstpnlmix + [pnlBaselineF])
# dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
| 0.11106 | 0.530358 |
# Table of Contents
<p><div class="lev1"><a href="#Bayes-Rule"><span class="toc-item-num">1 </span>Bayes Rule</a></div><div class="lev2"><a href="#Quiz:-Cancer-Test"><span class="toc-item-num">1.1 </span>Quiz: Cancer Test</a></div><div class="lev2"><a href="#Quiz:-Prior-and-Posterior"><span class="toc-item-num">1.2 </span>Quiz: Prior and Posterior</a></div><div class="lev2"><a href="#Quiz:-Normalizing"><span class="toc-item-num">1.3 </span>Quiz: Normalizing</a></div><div class="lev2"><a href="#Quiz:-Total-Probability"><span class="toc-item-num">1.4 </span>Quiz: Total Probability</a></div><div class="lev1"><a href="#Bayes-Rule-Diagram"><span class="toc-item-num">2 </span>Bayes Rule Diagram</a></div><div class="lev1"><a href="#Equivalent-Diagram"><span class="toc-item-num">3 </span>Equivalent Diagram</a></div><div class="lev2"><a href="#Quiz:-Cancer-Probabilities"><span class="toc-item-num">3.1 </span>Quiz: Cancer Probabilities</a></div><div class="lev2"><a href="#Quiz:-Probability-Given-Test"><span class="toc-item-num">3.2 </span>Quiz: Probability Given Test</a></div><div class="lev2"><a href="#Quiz:-Normalizer"><span class="toc-item-num">3.3 </span>Quiz: Normalizer</a></div><div class="lev2"><a href="#Quiz:-Normalizaing-Probability"><span class="toc-item-num">3.4 </span>Quiz: Normalizaing Probability</a></div><div class="lev2"><a href="#Quiz:-Disease-Test-1"><span class="toc-item-num">3.5 </span>Quiz: Disease Test 1</a></div><div class="lev2"><a href="#Quiz:-Disease-Test-2"><span class="toc-item-num">3.6 </span>Quiz: Disease Test 2</a></div><div class="lev2"><a href="#Quiz:-Disease-Test-3"><span class="toc-item-num">3.7 </span>Quiz: Disease Test 3</a></div><div class="lev2"><a href="#Quiz:-Disease-Test-4"><span class="toc-item-num">3.8 </span>Quiz: Disease Test 4</a></div><div class="lev2"><a href="#Quiz:-Disease-Test-5"><span class="toc-item-num">3.9 </span>Quiz: Disease Test 5</a></div><div class="lev2"><a href="#Quiz:-Disease-Test-6"><span class="toc-item-num">3.10 </span>Quiz: Disease Test 6</a></div><div class="lev1"><a href="#Bayes-Rule-Summary"><span class="toc-item-num">4 </span>Bayes Rule Summary</a></div><div class="lev2"><a href="#Quiz:-Robot-Sensing-1"><span class="toc-item-num">4.1 </span>Quiz: Robot Sensing 1</a></div><div class="lev2"><a href="#Quiz:-Robot-Sensing-2"><span class="toc-item-num">4.2 </span>Quiz: Robot Sensing 2</a></div><div class="lev2"><a href="#Quiz:-Robot-Sensing-3"><span class="toc-item-num">4.3 </span>Quiz: Robot Sensing 3</a></div><div class="lev2"><a href="#Quiz:-Robot-Sensing-4"><span class="toc-item-num">4.4 </span>Quiz: Robot Sensing 4</a></div><div class="lev2"><a href="#Quiz:-Robot-Sensing-5"><span class="toc-item-num">4.5 </span>Quiz: Robot Sensing 5</a></div><div class="lev2"><a href="#Quiz:-Robot-Sensing-6"><span class="toc-item-num">4.6 </span>Quiz: Robot Sensing 6</a></div><div class="lev2"><a href="#Quiz:-Robot-Sensing-7"><span class="toc-item-num">4.7 </span>Quiz: Robot Sensing 7</a></div><div class="lev2"><a href="#Quiz:-Robot-Sensing-8"><span class="toc-item-num">4.8 </span>Quiz: Robot Sensing 8</a></div><div class="lev1"><a href="#Generalizing"><span class="toc-item-num">5 </span>Generalizing</a></div><div class="lev2"><a href="#Quiz:-Sebastian-At-Home"><span class="toc-item-num">5.1 </span>Quiz: Sebastian At Home</a></div>
# Bayes Rule
We're going to talk about perhaps the holy grail of probabilistic inference. It's called Bayes Rule. Bayes Rule is based on Reverend Thomas Bayes, who used this principle to infer the existence of God, but in doing so, he created a new family of methods that has vastly influenced artificial intelligence and statistics.
## Quiz: Cancer Test
- Let's use the cancer example from my last unit. There's a specific cancer that occurs in 1% of the population, and a test for this cancer and with 90% chance it is positive if they have this cancer, C. That's usually called the sensitivity.
- But the test sometimes is positive, even if you don't have C. Let's say with another 90% chance it's negative if we don't have C. That's usually called the specificity.
- So here's my question. Without further symptoms, you take the test, and the test comes back positive. What do you think is the probability of having that specific type of cancer?
- To answer this, let's draw a diagram. Suppose these are all of the people, and some of them, exactly 1%, have cancer. 99% is cancer free. We know there's a test that if you have cancer, correctly diagnose it with 90% chance. So if we draw the area where the test is positive, cancer and test positive, then this area over here is 90% of the cancer circle. However, this isn't the full truth.
- The test sent out as positive even if the person doesn't have cancer. In fact, in our case, it happened to be in 10% of all cases. So we have to add more area, because as big as 10% of this large area is as big as 10% of this large area where the test might go positive, but the person doesn't have cancer. So this blue area is 10% of all the area over here minus the little small cancer circle. And clearly, all the area outside these circles corresponds a situation of no cancer, and the test is negative.
- So let me ask you again. Suppose we have a positive test, what do you think? Would a prior probability of cancer of 1%, a sensitivity and specificity of 90%, Do you think your new chances are now 90% or 8% or still just 1%?
- The question being asked is this: 1% of the population has cancer. Given that there is a 90% chance that you will test positive if you have cancer and that there is a 90% chance you will test negative if you don't have cancer, what is the probability that you have cancer if you test positive?
<img src="images/Screen Shot 2016-05-01 at 2.10.58 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487590180923)*
<!--TEASER_END-->
**Answer**
- And I would argue it's about 8%. In fact, as we see, it will come out at 8 1/3% mathematically. And the way to see this in this diagram is this is the region that should test as positive. By having a positive test, you know you're in this region, and nothing else matters.
- You know you're in this circle. But within this circle, the ratio of the cancerous region relative to the entire region is still pretty small. It increase, obviously, having a positive test changes your cancer probability, but it only increases by a factor of about 8, as we will see in a second.
<img src="images/Screen Shot 2016-05-01 at 2.14.02 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487590180923)*
<!--TEASER_END-->
## Quiz: Prior and Posterior
- So this is the essence of Bayes Rule, which I'll give to you to you in a second. There's some sort of a prior, of which we mean the probability before you run a test, and then you get some evidence from the test itself, and that all leads you to what's called a posterior probability.
- Now this is not really a plus operation. In fact, in reality, it's more like a multiplication, but semantically, what Bayes Rule does is it incorporates some evidence from the test into your prior probability to arrive at a posterior probability.
- So let's make this specific. In our cancer example, we know that the prior probability of cancer is 0.01, which is the same as 1%. The posterior of the probability of cancer given that our test is positive, abbreviate here as positive, is the product of the prior times our test sensitivity, which is what is the chance of a positive result given that I have cancer?
- And you might remember, this was 0.9, or 90%. Now just to warn you, this isn't quite correct. To make this correct, we also have to compute the posterior for the non cancer option, which there is no cancer given a positive test. And using the prior, we know that P of not C is 0.99. It's minus P of C Times the probability of getting a positive test result given not C.
- Realize these 2 equations are the same, but I exchanged C for not C. And this one over here takes a moment to computer. We know that our test gives us a negative result if it's cancer free, 0.9 chance As a result, it gives us a positive result in the cancer free case, with 10% chance.
- Now what's interesting is this is about the correct equation except the probabilities don't add up to 1. To see I'm going to ask you to compute those, so please give me the exact numbers for the first expression and the second expression written over here using our example up there.
<img src="images/Screen Shot 2016-05-01 at 2.40.19 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487236370923)*
<!--TEASER_END-->
**Answer**
- Obviously, P(C) is 0.01 x 0.9 is 0.009, whereas 0.99 x 0.1, this guy over here, is 0.099.
- What we've computed is here is the absolute area in here which is 0.009 in the absolute area in here which is 0.099.
<img src="images/Screen Shot 2016-05-01 at 2.42.46 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487236370923)*
<!--TEASER_END-->
## Quiz: Normalizing
The normalization proceeds in two steps. We just normalized these guys to keep ratio the same but make sure they add up to 1. So let's first compute the sum of these two guys.
**Answer**
- And, yes, the answer is 0.108. Technically, what this really means is the probability of a positive test result-- that's the area in the circle that I just marked. By virtue of what we learned last, it's just the sum of two things over here, which gives us 0.108.
<img src="images/Screen Shot 2016-05-01 at 2.46.08 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484529790923)*
<!--TEASER_END-->
And now finally, we come up with the actual posterior, whereas this one over here is often called the joint probability of two events. And the posterior is obtained by dividing this guy over here with this normalizer. So let's do this over here--let's divide this guy over here by this normalizer to get my percent distribution of having cancer given that I received the positive test result.
**Answer**
- The answer is 0.0833.
<img src="images/Screen Shot 2016-05-01 at 2.50.07 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/482999910923)*
<!--TEASER_END-->
Let's do the same for the non-cancer version, pick the number over here to divide and divide it by this same normalizer.
**Answer**
- The answer is 0.9167 approximately.
<img src="images/Screen Shot 2016-05-01 at 2.52.20 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487441150923)*
<!--TEASER_END-->
## Quiz: Total Probability
Why don't you for a second add these two numbers and give me the result?
- The answer is 1
<img src="images/Screen Shot 2016-05-01 at 2.53.47 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487093190923)*
<!--TEASER_END-->
# Bayes Rule Diagram
- Well, we really said that we had a situation where the prior P(C), a test with a certain sensitivity (Pos/C), and a certain specificity (Neg/₇C). When you receive, say, a positive test result, what you do is, you take your prior P(C) you multiply in the probability of this test result, given C, and you multiply in the probability of the test result given (Neg/₇C).
- So, this is your branch for the consideration that you have cancer. This is your branch for the consideration of no cancer. When you're done with this, you arrive at a number that now combines the cancer hypothesis with the test result. Look for the cancer hypothesis and the no cancer hypothesis. Now, what you do, you add those up and then normally don't add up to one. You get a certain quantity which happens to be the total probability that the test is what it was in this case positive.
- And all you do next is divide or normalize this thing over here by the sum over here and the same on the right side. The divider is the same for both cases because this is your cancer branch, your non-cancer branch, but this score does not depend on the cancer variable anymore. What you now get out is the desired posterior probability, and those add up to 1 if you did everything correct, as shown over here. This is the algorithm for Bayes Rule.
<img src="images/Screen Shot 2016-05-01 at 4.32.27 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487092840923)*
<!--TEASER_END-->
# Equivalent Diagram
Now, the same algorithm works if your test says negative. Suppose your test result says negative. You could still ask the same question:
- Now, what's my probability having cancer or not? But now all the positives in here become negatives.
- The sum is the total probability of negative test results, and we may now divide by this score, you now get the posterior probability for cancer and non-cancer assuming you had a negative test result, which of course to be much, much more favorable for you because none of us wants to have cancer.
- So, look at this for a while and let's now do the calculation for the negative case using the same numbers I gave you before, and with the step by step this time around so it can really guide you through the process.
<img src="images/Screen Shot 2016-05-02 at 7.03.41 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484805600923)*
<!--TEASER_END-->
## Quiz: Cancer Probabilities
We begin with our prior probability, our sensitivity and our specifitivity, and I want you to begin by filling in all the missing values. So, there's the probability of no cancer, probability of negative, which is negation of positive, given C, and probability of negative-positive given not C.
**Answer**
- And obviously this is still 0.99 as before 0.1 and 0.1. I hope you got this correct.
<img src="images/Screen Shot 2016-05-02 at 7.06.01 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484806070923)*
<!--TEASER_END-->
## Quiz: Probability Given Test
Now assume the test comes back negative, the same logic applies as before. So please give me the combined probability of cancer given the negative test result and the combined probability of being cancer-free given the negative test result.
**Answer**
- The number here is 0.001 and it's the product of my prior for cancer which is 0.01, and the probability of getting a negative result in the case of cancer which is right over here, 0.1. If I multiply these two things together, I get **0.001**.
- The probability here is 0.891. And when I'm multiplying is the prior probability of not having cancer which is 0.99 with the probability of seeing a negative result in the case of not having cancer, and that is the one right over here, 0.9. So, we'll multiply 0.99 with 0.9, I actually get **0.891**.
<img src="images/Screen Shot 2016-05-02 at 7.12.06 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487093200923)*
<!--TEASER_END-->
## Quiz: Normalizer
Let's compute the normalizer. You now remember what this was.
**Answer**
- And the answer is 0.892. You just add up these two values over here.
<img src="images/Screen Shot 2016-05-02 at 7.13.50 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487236380923)*
<!--TEASER_END-->
## Quiz: Normalizaing Probability
Now finally tell me what is posterior probability of cancer given that we know we had a negative test result and the probability of negative cancer given there is a negative test result.
**Answer**
- This is approximately 0.0011, which we get by dividing 0.001 by the normalizer 0.892, and the posterior probability of being cancer-free after the test is approximately 0.9989, and that's obtained by dividing this probability over here by the normalizer and not surprisingly, these two values indeed add up to 1.
- Now, what's remarkable about this outcome is really what it means. Before the test, we had a 1% chance of having cancer, now, we have about a 0.9% chance of having cancer. So, a cancer probability went down by about a factor of 9.
- So, the test really helped us gaining confidence that we are cancer-free. Conversely, before we had a 99% chance of being cancer free, now it's 99.89%. So, all the numbers are working exactly how we expect them to work.
<img src="images/Screen Shot 2016-05-02 at 7.18.27 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484529800923)*
<!--TEASER_END-->
## Quiz: Disease Test 1
Let me now make your life harder. Suppose our probability of a certain other kind of disease is 0.1, so 10% of the population has it. Our test in the positive case is really informative, but there's a 0.5 chance that if I'm cancer-free the test, indeed, says the same thing. So the sensitivity is high, the specificity is lower. And let's start by filling in the first 3 of them.
**Answer**
- Obviously, these are just 1 minus those: 0.9, 0.1, and 0.5.
<img src="images/Screen Shot 2016-05-02 at 7.25.01 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487441160923)*
<!--TEASER_END-->
## Quiz: Disease Test 2
What is P(C, Neg)?
**Answer**
- And the answer is 0.01.
- P(C) = 0.1, and P(Neg│C) is also 0.1, so if you multiply those two they are 0.01.
<img src="images/Screen Shot 2016-05-02 at 7.27.46 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487043450923)*
<!--TEASER_END-->
## Quiz: Disease Test 3
And what's the same for P(¬C, Neg).
**Answer**
- And the answer is 0.45.
- P(¬C) is 0.9, and P(Neg│¬C) is 0.5. So 0.9 * 0.5 = 0.45.
<img src="images/Screen Shot 2016-05-02 at 7.32.10 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487550520923)*
<!--TEASER_END-->
## Quiz: Disease Test 4
What is P(Neg)?
**Answer**
- Well, you just add up these two numbers to get 0.46.
<img src="images/Screen Shot 2016-05-02 at 7.37.26 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/483118800923)*
<!--TEASER_END-->
## Quiz: Disease Test 5
So tell me what the final two numbers are.
**Answer**
- The first one is 0.01 divided by normalized 0.46 and that gives us 0.0217, and the second one is called over here 0.45 divided by 0.46 and that gives us 0.9783
- These are the correct posteriors, restarted our chance of 10% of having cancer. We had a negative result. We're down now to about 2% of having cancer.
<img src="images/Screen Shot 2016-05-02 at 9.02.52 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484806080923)*
<!--TEASER_END-->
## Quiz: Disease Test 6
Let's now consider the case that the test result is positive, and I want you to just give me the two numbers over here and not the other ones.
**Answer**
- So once again, we have 0.9, 0.1, and 0.5 over here.
- Very quickly multiplying this guy with this girl over here 0.09. This guy with this girl over here 0.45.Adding them up gives us 0.54, and dividing those correspondingly 0.9 divided by 0.54gives us 0.166 and so on and 0.833 and so on for dividing 0.45 by 0.54.
- And with this means, with the positive test result, our chance of cancer increased from 0.1 to 0.16. Obviously, our chance of having no cancer decreased accordingly.
<img src="images/Screen Shot 2016-05-02 at 9.08.37 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/487093210923)*
<!--TEASER_END-->
# Bayes Rule Summary
- In Bayes rule, we have a hidden variable we care about--whether they have cancer or not. But we can't measure it directly and instead we have a test. We have a prior of how frequent this variable is true and the test is generally characterized by how often it says positive when the variable is true and how often it is negative and the variable is false.
- Bayes rule takes a prior, multiplies in the measurement, which in this case we assume to be the positive measurement to give us a new variable and does the same for all actual measurement, given the opposite assumption about our hidden variable of cancer and that multiplication gives us this guy over here.
- We add those two things up and then it gives us a new variable and then we divide these guys to arrive the best estimate of the hidden variable c given our test result. And this example, I used the positive example is a test result but it might do the same with a negative example.
- This was exactly the same as in our diagram in the beginning. There was a prior of our case, we have this specific variable to be true. We noticed inside this prior, it can cover the region for which our test result applies.We noticed that test result also apply when the condition is not fulfilled.
- So, this expression over here and this expression over here corresponds exactly to the red area over here and the green area over here. But then we noticed that these two areas don't add up to 1. The reason is that's lots of stuff outside, so we calculated the total area which was this expression over here, pPos.
- And then we normalized these two things over here by the total area to get the relative area that is assigned the red thing versus the green thing and at this time by just dividing by the total area in this region over here; thereby, getting rid of any of the other cases.
<img src="images/Screen Shot 2016-05-02 at 9.17.18 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484805560923)*
<!--TEASER_END-->
## Quiz: Robot Sensing 1
Now, I should say if we got this, you don't find any immediate significant about statistics and probability. This is totally nontrivial, but it comes in very handy.
- So, I'm going to practice this with you using a second example. In this case, you are a robot.
- This robot lives in a world of exactly two places. There is a red place and a green place, R and G. Now, I say initially, this robot has no clue where it is, so the prior probability for either place, red or green, is 0.5.
- It also has a sensor as it can see through its eyes, but his sensor seems to be somewhat unreliable. So, the probability of seeing red at the red grid cell is 0.8, and the probability of seeing green at the green cell is also 0.8.
- Now, I suppose the robot sees red. What are now the posterior probabilities that the robot is at the red cell given that it just saw red and conversely what's the probability that it's at the green cell even though it saw red. Now, you can apply Bayes Rule and figure that out.
**Answer**
In this example, it gives us funny numbers.
- It was for red as 0.8 and one for the green as 0.2. 8And it's all to do with the fact that in the beginning where there had no clue where it is.
- The joint for red after seeing red is 0.4. The same for green is 0.1. 0.4+0.1, S to 0.5. If you normalized 0.4 divided by 0.5, you get 0.8, and if you normalized 0.1 by 0.5, you get 0.2.
**Break down by steps**
```
- P(at R|see R) = P(at R) x P(see R|at R) = 0.5 * 0.8 = 0.4
- P(at G|see R) = P(at G) x P(see R|at G) = 0.5 * 0.2 = 0.1
- P(at R) = P(at R|see R) + P(at G|see R) = 0.5
- P(at R|see R) = P(at R|see R)/P(at R) = 0.4/0.5 = 0.8
- P(at G|see R) = P(at G|see R)/P(at R) = 0.1/0.5 = 0.2
```
<img src="images/Screen Shot 2016-05-02 at 9.33.58 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484806090923)*
<!--TEASER_END-->
## Quiz: Robot Sensing 2
If I now change some parameters--say the robot knows the probability that it's red, and therefore, the probability 1 is under the green cell as a prior.mPlease calculate once again using Bayes rule these posteriors. I have to warn you--this is a bit of a tricky case.
**Answer**
- And the answer is, the prior isn't affected by the measurement, so the probability of 0 is at red, and the probability of 1 at green, despite the fact that it's all red.
- To see this, you find the joint of seeing it red and seeing red is 0 times 0.8, that's 0.
- That's the same join for green is 1 times 0.2.
- So you have to normalize 0 and 0.2. The sum of those is 0.2.
- So let's divide 0 by 0.2, gives us 0, and 0.2 divided by 0.2 gives us 1.
- These are exactly the numbers over here.
<img src="images/Screen Shot 2016-05-02 at 9.38.06 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484529810923)*
<!--TEASER_END-->
## Quiz: Robot Sensing 3
To change this example even further. Let's make this over here a 0.5 and revert back to a uniform prior. Please go ahead and calculate the posterior probability.
**Answer**
- Now the answer is about 0.615 or 0.385.
- These are approximate. Once again, 0.5 times 0.8 is 0.4. 0.5 minus this guy is again 0.5. 0.25, add those up, 0.65,
- normalizing 0.4 divided by 0.65 gives approximately 0.615. 0.25 divided by 0.65 is approximately 0.385.
**Break down by steps**
```
- P(at R|see R) = P(at R) x P(see R|at R) = 0.5 * 0.8 = 0.4
- P(at G|see R) = P(at G) x P(see R|at G) = 0.5 * 0.5 = 0.25
- P(at R) = P(at R|see R) + P(at G|see R) = 0.65
- P(at R|see R) = P(at R|see R)/P(at R) = 0.4/0.65 = 0.615
- P(at G|see R) = P(at G|see R)/P(at R) = 0.05/0.65 = 0.385
```
<img src="images/Screen Shot 2016-05-02 at 9.41.53 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/483118810923)*
<!--TEASER_END-->
## Quiz: Robot Sensing 4
- I will now make your life really hard. Suppose there are 3 places in the world, not just 2. There are a red one and 2 green ones. And for simplicity, we'll call them A, B, and C.
- Let's assume that all of them have the same prior probability of 1/3 or 0.333, so on. Let's say the robot sees red, and as before, the probability of seeing red in Cell A is 0.9. The probability of seeing green in Cell B 20.9. Probability of seeing green in Cell C is also 0.9.
- So what I've changed is, I've given the hidden variable, kind of like the cancer/non cancer variable, 3 states. There's not just 2 as before, A or B. It's now A, B, or C.
- Let's solve this problem together, because it follows exactly the same recipe as before, even though it might not be obvious.
- So let me ask you, what is the joint of being in Cell A after having seen the red color?
**Answer**
- And just like before, we multiply the prior, this guy over here, that gives you 0.3.
- P(A,R) = P(A) * P(R|A) = 0.333 x 0.9 = 0.3
<img src="images/Screen Shot 2016-05-02 at 9.49.08 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484806100923)*
<!--TEASER_END-->
## Quiz: Robot Sensing 5
What's the joined for Cell B?
**Answer**
- Well, the answer is you multiply our prior of 1/3 with the probability of seeing red in Cell B, as seeing green at 0.9 probability, so red is 0.1. So 0.1 times this guy over here gives 0.033.
- P(B,R) = P(B) * P(R|B) = 0.333 x 0.1 = 0.033
<img src="images/Screen Shot 2016-05-02 at 9.52.30 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484529820923)*
<!--TEASER_END-->
## Quiz: Robot Sensing 6
Finally, probability of C and Red. What is that?
**Answer**
- And the answer is exactly the same as this over here, because the prior is the same for B and C, and those probabilities are the same for B and C, so they should be exactly the same.
- P(C,R) = P(C) * P(R|C) = 0.333 x 0.1 = 0.033
<img src="images/Screen Shot 2016-05-02 at 9.54.41 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/483118820923)*
<!--TEASER_END-->
## Quiz: Robot Sensing 7
What is our normalizer?
**Answer**
- And the answer is, you just add those up.
- P(R) = P(A,R) + P(B,R) + P(C,R) = 0.3 + 0.33 + 0.33 = 0.366
<img src="images/Screen Shot 2016-05-02 at 9.57.35 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484806110923)*
<!--TEASER_END-->
## Quiz: Robot Sensing 8
And now we calculate the desired posterior probability for all 3 possible outcomes.
**Answer**
- As usual, we divide this guy over here by the normalizer, which gives us 0.818. Realize all these numbers are a little bit approximate here. Same for this guy, it's approximately 0.091. And this is completely symmetrical, 0.091. And surprise, these guys all add up to 1.
<img src="images/Screen Shot 2016-05-02 at 10.45.50 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/484529830923)*
<!--TEASER_END-->
# Generalizing
So what have you learned?
- In Bayes Rule, there will be more than just 2 underlying causes of cancer/non cancer. There might be 3, 4, or 5, any number. We can apply exactly the same math, but we have to keep track of more values.
- In fact, the robot might also have more than just 2 test outcomes. Here was red or green, but it could be red, green, or blue.
- And this means that our measurement probability will be more elaborate. I have to give you more information, but the math remains exactly the same. We can now deal with very large problems that have many possible hidden causes of where the world might be, and we can still apply Bayes Rule to find all of these numbers.
## Quiz: Sebastian At Home
- This test is actually directly taken from my life and you'll smile when you see my problem. I used to travel a lot. It was so bad for a while. I would find myself in a bed not knowing what country I'm in. I kid you not.
- So let's say, I'm gone 60% of my time and I'm at home only 40% of my time. Now at summer, I live in California and it truly doesn't rain in the summer. Whereas in many of the countries I have traveled to, there's a much higher chance of rain.
- So let's now say, I lie in my bed, here I am lying in bed, and I wake up and I open the window and I see it's raining. Let's now apply Bayes rule--What do you think is the probability I'm home now that I see it's raining--just give me this one number.
**Answer**
- And I get 0.0217, which is a really small thing.
- And the way I get there is what taking home times the probability of rain at home normalizing it using the same number of a year plus the calculation for the same probability of being gone is 0.6 times the rain I've been gone has a probability of 0.3 and that results is 0.0217 or the better of 2%--did you get this?
- If so, you now understand something that's really interesting. You're able to look at a hidden variable, understand how a test can give you information back about this hidden variable and that's really cool because it allows you to apply the same scheme to great many practical problems in the world--congratulations! In our next unit, which is optional, I like you to program all of this so you can try the same thing in an actual program interface and writes software that implements things such as Bayes rule.
**Break down by steps**
```
- P(home|rain) = P(home) x P(rain|home) = 0.4 * 0.01 = 0.004
- P(gone|rain) = P(gone) x P(rain|gone) = 0.6 * 0.3 = 0.18
- P(rain) = P(home|rain) + P(gone|rain) = 0.184
- P(home|rain) = P(home|rain)/P(rain) = 0.004/0.184 = 0.0217
- P(gone|rain) = P(gone|rain)/P(rain) = 0.18/0.184 = 0.978
```
<img src="images/Screen Shot 2016-05-02 at 10.57.31 PM.png"/>
*Screenshot taken from [Udacity](https://classroom.udacity.com/courses/st101/lessons/48703346/concepts/482999920923)*
<!--TEASER_END-->
|
github_jupyter
|
- P(at R|see R) = P(at R) x P(see R|at R) = 0.5 * 0.8 = 0.4
- P(at G|see R) = P(at G) x P(see R|at G) = 0.5 * 0.2 = 0.1
- P(at R) = P(at R|see R) + P(at G|see R) = 0.5
- P(at R|see R) = P(at R|see R)/P(at R) = 0.4/0.5 = 0.8
- P(at G|see R) = P(at G|see R)/P(at R) = 0.1/0.5 = 0.2
- P(at R|see R) = P(at R) x P(see R|at R) = 0.5 * 0.8 = 0.4
- P(at G|see R) = P(at G) x P(see R|at G) = 0.5 * 0.5 = 0.25
- P(at R) = P(at R|see R) + P(at G|see R) = 0.65
- P(at R|see R) = P(at R|see R)/P(at R) = 0.4/0.65 = 0.615
- P(at G|see R) = P(at G|see R)/P(at R) = 0.05/0.65 = 0.385
- P(home|rain) = P(home) x P(rain|home) = 0.4 * 0.01 = 0.004
- P(gone|rain) = P(gone) x P(rain|gone) = 0.6 * 0.3 = 0.18
- P(rain) = P(home|rain) + P(gone|rain) = 0.184
- P(home|rain) = P(home|rain)/P(rain) = 0.004/0.184 = 0.0217
- P(gone|rain) = P(gone|rain)/P(rain) = 0.18/0.184 = 0.978
| 0.207415 | 0.981927 |
```
import scipy
import scipy.integrate
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
import glob
import os
import re
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
rc('xtick', labelsize=20)
rc('ytick', labelsize=20)
# birky: [0.0920755, -3.05439334, 3.26763974, 7.52312497, -1.1520437]
# fleming: [0.08894253, -3.0280624, 6.56208574, 7.5178048, -1.15758413]
def interp_lxuv(sims, **kwargs):
results = np.genfromtxt(sims)
time = results.T[0]
lbol = results.T[1]
lxuv = results.T[2]
f = interp1d(time, lxuv, kind='cubic')
return time, lxuv, f
def integrate_lxuv(time, lxuv, f):
# int_lxuv = scipy.integrate.quad(f, time[1], max(time))[0]
tarr = np.arange(time[1], max(time)+1, 1e7)
int_lxuv = 0
for i in range(tarr.shape[0]-1):
int_lxuv += scipy.integrate.quad(f, tarr[i], tarr[i+1])[0]
return int_lxuv
def plot_lxuv(time, lxuv, f, **kwargs):
plt.figure(figsize=[8,6])
plt.plot(time, lxuv, color='k', linewidth=3, label='VPLanet')
plt.plot(time, f(time), color='r', linestyle='--', linewidth=3, label='Cubic Interpolation')
plt.xlabel('Time [yr]', fontsize=20)
plt.ylabel(r'$L_{\rm XUV} \, [L_{\odot}]$', fontsize=20)
if 'title' in kwargs:
plt.title(kwargs['title'], fontsize=22)
plt.legend(loc='upper right', fontsize=18)
plt.ylim(10**-6.3, 10**-4)
plt.xscale('log')
plt.yscale('log')
if 'save' in kwargs:
plt.savefig(kwargs['save'])
plt.show()
def lum_to_flux(lum, dist):
"""
input:
lum [Lsun]
dist [cm]
output:
fxuv [fxuv, earth]
"""
LSUN = 3.826e33 # erg/s
return (lum * LSUN)/(4 * np.pi * dist**2)
def flux_to_energy(flux, radius):
return flux * np.pi * radius**2
dist = np.array([1.726, 2.364, 3.331, 4.376, 5.758, 7.006, 9.259]) * 1e11 # cm
radii = np.array([7.119, 6.995, 5.026, 5.868, 6.664, 7.204, 4.817]) * 1e8 # cm
time_b, lxuv_b, f_b = interp_lxuv("sims_updated/Trappist.star.forward")
lxuv_tot_b = integrate_lxuv(time_b, lxuv_b, f_b)
plot_lxuv(time_b, lxuv_b, f_b, title=r'Updated Model: $L_{\rm XUV, tot}=%.0f$'%(lxuv_tot_b), save='updated_model.png')
fxuv_b = lum_to_flux(lxuv_tot_b, dist)
energy_b = flux_to_energy(fxuv_b, radii)
print('integrated flux for each planet:', fxuv_b)
print('integrated energy for each planet:', energy_b)
time_f, lxuv_f, f_f = interp_lxuv("sims_fleming/Trappist.star.forward")
lxuv_tot_f = integrate_lxuv(time_f, lxuv_f, f_f)
plot_lxuv(time_f, lxuv_f, f_f, title=r'F20 Model: $L_{\rm XUV, tot}=%.0f$'%(lxuv_tot_f), save='fleming_model.png')
fxuv_f = lum_to_flux(lxuv_tot_f, dist)
energy_f = flux_to_energy(fxuv_f, radii)
print('integrated flux for each planet:', fxuv_f)
print('integrated energy for each planet:', energy_f)
lxuv_tot_b/lxuv_tot_f
time_b, lxuv_b, f = interp_lxuv("sims_updated/Trappist.star.forward")
time_f, lxuv_f, f = interp_lxuv("sims_fleming/Trappist.star.forward")
plt.plot(time_b, lxuv_b, label='updated')
plt.plot(time_f, lxuv_f, label='fleming')
plt.xlabel('Time [yr]', fontsize=20)
plt.ylabel(r'$L_{\rm XUV} \, [L_{\odot}]$', fontsize=20)
plt.legend(loc='upper right', fontsize=18)
plt.ylim(10**-7, 10**-4)
plt.xscale('log')
plt.yscale('log')
plt.savefig('comparison.png')
plt.show()
dist = np.array([1.496]) * 1e13
radii = np.array([6.957]) * 1e8
time_s, lxuv_s, f_s = interp_lxuv("sims_earth/sun.sun.forward")
lxuv_tot_s = integrate_lxuv(time_s, lxuv_s, f_s)
plot_lxuv(time_s, lxuv_s, f_s, title=r'F20 Model: $L_{\rm XUV, tot}=%.0f$'%(lxuv_tot_s), save='sun_model.png')
fxuv_s = lum_to_flux(lxuv_tot_s, dist)
energy_s = flux_to_energy(fxuv_s, radii)
print('integrated flux for earth:', fxuv_s)
print('integrated energy for earth:', energy_s)
```
|
github_jupyter
|
import scipy
import scipy.integrate
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
import glob
import os
import re
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
rc('xtick', labelsize=20)
rc('ytick', labelsize=20)
# birky: [0.0920755, -3.05439334, 3.26763974, 7.52312497, -1.1520437]
# fleming: [0.08894253, -3.0280624, 6.56208574, 7.5178048, -1.15758413]
def interp_lxuv(sims, **kwargs):
results = np.genfromtxt(sims)
time = results.T[0]
lbol = results.T[1]
lxuv = results.T[2]
f = interp1d(time, lxuv, kind='cubic')
return time, lxuv, f
def integrate_lxuv(time, lxuv, f):
# int_lxuv = scipy.integrate.quad(f, time[1], max(time))[0]
tarr = np.arange(time[1], max(time)+1, 1e7)
int_lxuv = 0
for i in range(tarr.shape[0]-1):
int_lxuv += scipy.integrate.quad(f, tarr[i], tarr[i+1])[0]
return int_lxuv
def plot_lxuv(time, lxuv, f, **kwargs):
plt.figure(figsize=[8,6])
plt.plot(time, lxuv, color='k', linewidth=3, label='VPLanet')
plt.plot(time, f(time), color='r', linestyle='--', linewidth=3, label='Cubic Interpolation')
plt.xlabel('Time [yr]', fontsize=20)
plt.ylabel(r'$L_{\rm XUV} \, [L_{\odot}]$', fontsize=20)
if 'title' in kwargs:
plt.title(kwargs['title'], fontsize=22)
plt.legend(loc='upper right', fontsize=18)
plt.ylim(10**-6.3, 10**-4)
plt.xscale('log')
plt.yscale('log')
if 'save' in kwargs:
plt.savefig(kwargs['save'])
plt.show()
def lum_to_flux(lum, dist):
"""
input:
lum [Lsun]
dist [cm]
output:
fxuv [fxuv, earth]
"""
LSUN = 3.826e33 # erg/s
return (lum * LSUN)/(4 * np.pi * dist**2)
def flux_to_energy(flux, radius):
return flux * np.pi * radius**2
dist = np.array([1.726, 2.364, 3.331, 4.376, 5.758, 7.006, 9.259]) * 1e11 # cm
radii = np.array([7.119, 6.995, 5.026, 5.868, 6.664, 7.204, 4.817]) * 1e8 # cm
time_b, lxuv_b, f_b = interp_lxuv("sims_updated/Trappist.star.forward")
lxuv_tot_b = integrate_lxuv(time_b, lxuv_b, f_b)
plot_lxuv(time_b, lxuv_b, f_b, title=r'Updated Model: $L_{\rm XUV, tot}=%.0f$'%(lxuv_tot_b), save='updated_model.png')
fxuv_b = lum_to_flux(lxuv_tot_b, dist)
energy_b = flux_to_energy(fxuv_b, radii)
print('integrated flux for each planet:', fxuv_b)
print('integrated energy for each planet:', energy_b)
time_f, lxuv_f, f_f = interp_lxuv("sims_fleming/Trappist.star.forward")
lxuv_tot_f = integrate_lxuv(time_f, lxuv_f, f_f)
plot_lxuv(time_f, lxuv_f, f_f, title=r'F20 Model: $L_{\rm XUV, tot}=%.0f$'%(lxuv_tot_f), save='fleming_model.png')
fxuv_f = lum_to_flux(lxuv_tot_f, dist)
energy_f = flux_to_energy(fxuv_f, radii)
print('integrated flux for each planet:', fxuv_f)
print('integrated energy for each planet:', energy_f)
lxuv_tot_b/lxuv_tot_f
time_b, lxuv_b, f = interp_lxuv("sims_updated/Trappist.star.forward")
time_f, lxuv_f, f = interp_lxuv("sims_fleming/Trappist.star.forward")
plt.plot(time_b, lxuv_b, label='updated')
plt.plot(time_f, lxuv_f, label='fleming')
plt.xlabel('Time [yr]', fontsize=20)
plt.ylabel(r'$L_{\rm XUV} \, [L_{\odot}]$', fontsize=20)
plt.legend(loc='upper right', fontsize=18)
plt.ylim(10**-7, 10**-4)
plt.xscale('log')
plt.yscale('log')
plt.savefig('comparison.png')
plt.show()
dist = np.array([1.496]) * 1e13
radii = np.array([6.957]) * 1e8
time_s, lxuv_s, f_s = interp_lxuv("sims_earth/sun.sun.forward")
lxuv_tot_s = integrate_lxuv(time_s, lxuv_s, f_s)
plot_lxuv(time_s, lxuv_s, f_s, title=r'F20 Model: $L_{\rm XUV, tot}=%.0f$'%(lxuv_tot_s), save='sun_model.png')
fxuv_s = lum_to_flux(lxuv_tot_s, dist)
energy_s = flux_to_energy(fxuv_s, radii)
print('integrated flux for earth:', fxuv_s)
print('integrated energy for earth:', energy_s)
| 0.566618 | 0.410018 |
```
import scipy
import statsmodels
import sklearn
import theano
import tensorflow
import keras
import glob
import os
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
import pandas as pd
import math
import matplotlib.pyplot as plt
#read data
df = pd.read_csv('./Data/gill503_2021-02-01-0000_2021-02-08-0000.csv', index_col=False)
print(len(df))
df.columns
#process raw data
df['elevation']=0 #fix here
df['elevdiff']=df['elevation'].diff() #ft
df['elevdiff']=df['elevation']*0.000189394 #convert ft to mile
df['distdiff']=df['Analysis - other - Distance driven [mi]'].diff()
df['roadGrade']=df['elevdiff']/df['distdiff']
df['temp']=0 #fix here
df['speed'] = df['distdiff']*3600*1.60934 #convert to km/h
#interpolate if raw data is unfilled
FuelRate = df['Engine - Engine Fuel Rate [gal/h]']
FuelRate = FuelRate.interpolate()
df['FuelRate'] = FuelRate
Speed = df['speed']
Speed = Speed.interpolate()
df['speed'] = Speed
df=df[['speed','FuelRate']]
#get acceleration
speedms = df['speed']*1000/3600
df['acceleration']=speedms.diff() #unit: m/s2
df = df.drop(df[df.FuelRate == 0].index)
df=df.dropna()
#split train and test datasets
train = df.sample(n=math.floor(0.8*df.shape[0]))
test = df.sample(n=math.ceil(0.2*df.shape[0]))
#build ann model
Y_train = train['FuelRate'] #unit: gal/h
X_train = train[['speed','acceleration']]
Y_test = test['FuelRate']
X_test = test[['speed','acceleration']]
model = Sequential()
model.add(Dense(5,kernel_initializer='normal', input_dim=2, activation ='relu'))
model.add(Dense(5, kernel_initializer='normal', activation ='relu'))
model.add(Dense(1,kernel_initializer='normal', activation ='linear'))
model.compile(loss='mean_absolute_error', optimizer='adam')
#fit model
history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=50, batch_size=256, verbose = 0)
#performance
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
#predict all trips in a for loop
path = r'path/'
all_files = glob.glob(os.path.join(path, "Trajectory*.csv"))
colnames=['time_ms','speed','acceleration','vehicle_ref','actorConfig_id','actorConfig_emissionClass','actorConfig_fuel','actorConfig_ref','actorConfig_vehicleClass']
for f in all_files:
trip=pd.read_csv(f,names=colnames, header=None)
trip['speed']=trip['speed']*(0.01*3.6)
#km/h
trip['acceleration']=trip['acceleration']*(0.001)
#m/s2
input2esti=trip[['speed','acceleration']]
#prdiction and plot results
pre = model.predict(input2esti)
tripf=pd.concat([trip,pd.DataFrame(pre,columns=['FuelRateH'])], axis=1)
with open('./Data/hybrid/' + 'hybrid' + f[65:73] +'_'+ f[-12:-4] + '.csv', 'w', newline='') as oFile:
tripf.to_csv(oFile, index = False)
#read trajectory data that needs prediction
trip = pd.read_csv("./Route10A_trip151970020_060600.csv")
trip['speed']=trip['speed']*(0.01*3.6)
#km/h
trip['acceleration']=trip['acceleration']*(0.001)
#m/s2
input2esti=trip[['speed','acceleration']]
#prdiction and plot results
pre = model.predict(input2esti)
tripf=pd.concat([trip,pd.DataFrame(pre,columns=['FuelRate'])], axis=1)
fig, ax1 = plt.subplots(figsize=(6, 4))
ax1.plot(tripf.index, tripf.FuelRate, color='blue', linewidth=1)
ax1.set_xticks(tripf.index[::360])
ax1.set_xticklabels(tripf.time[::360], rotation=45)
plt.tight_layout(pad=4)
plt.subplots_adjust(bottom=0.15)
plt.xlabel("Time",fontsize = 14)
plt.ylabel("Fuel consumption rate (gal/h)",fontsize = 14)
plt.show()
```
|
github_jupyter
|
import scipy
import statsmodels
import sklearn
import theano
import tensorflow
import keras
import glob
import os
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
import pandas as pd
import math
import matplotlib.pyplot as plt
#read data
df = pd.read_csv('./Data/gill503_2021-02-01-0000_2021-02-08-0000.csv', index_col=False)
print(len(df))
df.columns
#process raw data
df['elevation']=0 #fix here
df['elevdiff']=df['elevation'].diff() #ft
df['elevdiff']=df['elevation']*0.000189394 #convert ft to mile
df['distdiff']=df['Analysis - other - Distance driven [mi]'].diff()
df['roadGrade']=df['elevdiff']/df['distdiff']
df['temp']=0 #fix here
df['speed'] = df['distdiff']*3600*1.60934 #convert to km/h
#interpolate if raw data is unfilled
FuelRate = df['Engine - Engine Fuel Rate [gal/h]']
FuelRate = FuelRate.interpolate()
df['FuelRate'] = FuelRate
Speed = df['speed']
Speed = Speed.interpolate()
df['speed'] = Speed
df=df[['speed','FuelRate']]
#get acceleration
speedms = df['speed']*1000/3600
df['acceleration']=speedms.diff() #unit: m/s2
df = df.drop(df[df.FuelRate == 0].index)
df=df.dropna()
#split train and test datasets
train = df.sample(n=math.floor(0.8*df.shape[0]))
test = df.sample(n=math.ceil(0.2*df.shape[0]))
#build ann model
Y_train = train['FuelRate'] #unit: gal/h
X_train = train[['speed','acceleration']]
Y_test = test['FuelRate']
X_test = test[['speed','acceleration']]
model = Sequential()
model.add(Dense(5,kernel_initializer='normal', input_dim=2, activation ='relu'))
model.add(Dense(5, kernel_initializer='normal', activation ='relu'))
model.add(Dense(1,kernel_initializer='normal', activation ='linear'))
model.compile(loss='mean_absolute_error', optimizer='adam')
#fit model
history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=50, batch_size=256, verbose = 0)
#performance
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
#predict all trips in a for loop
path = r'path/'
all_files = glob.glob(os.path.join(path, "Trajectory*.csv"))
colnames=['time_ms','speed','acceleration','vehicle_ref','actorConfig_id','actorConfig_emissionClass','actorConfig_fuel','actorConfig_ref','actorConfig_vehicleClass']
for f in all_files:
trip=pd.read_csv(f,names=colnames, header=None)
trip['speed']=trip['speed']*(0.01*3.6)
#km/h
trip['acceleration']=trip['acceleration']*(0.001)
#m/s2
input2esti=trip[['speed','acceleration']]
#prdiction and plot results
pre = model.predict(input2esti)
tripf=pd.concat([trip,pd.DataFrame(pre,columns=['FuelRateH'])], axis=1)
with open('./Data/hybrid/' + 'hybrid' + f[65:73] +'_'+ f[-12:-4] + '.csv', 'w', newline='') as oFile:
tripf.to_csv(oFile, index = False)
#read trajectory data that needs prediction
trip = pd.read_csv("./Route10A_trip151970020_060600.csv")
trip['speed']=trip['speed']*(0.01*3.6)
#km/h
trip['acceleration']=trip['acceleration']*(0.001)
#m/s2
input2esti=trip[['speed','acceleration']]
#prdiction and plot results
pre = model.predict(input2esti)
tripf=pd.concat([trip,pd.DataFrame(pre,columns=['FuelRate'])], axis=1)
fig, ax1 = plt.subplots(figsize=(6, 4))
ax1.plot(tripf.index, tripf.FuelRate, color='blue', linewidth=1)
ax1.set_xticks(tripf.index[::360])
ax1.set_xticklabels(tripf.time[::360], rotation=45)
plt.tight_layout(pad=4)
plt.subplots_adjust(bottom=0.15)
plt.xlabel("Time",fontsize = 14)
plt.ylabel("Fuel consumption rate (gal/h)",fontsize = 14)
plt.show()
| 0.468547 | 0.357792 |
```
import os
import pandas as pd
using_Google_colab = False
using_Anaconda_on_Mac_or_Linux = True
using_Anaconda_on_windows = False
if using_Google_colab:
from google.colab import drive
drive.mount('/content/drive')
if using_Google_colab:
dir_input = "/content/drive/MyDrive/COVID_Project/input"
if using_Anaconda_on_Mac_or_Linux:
dir_input = "../input"
if using_Anaconda_on_windows:
dir_input = r"..\input"
```
## PD3.1 Open Notebook titled “Reformat_operation - Activity 1”
Read file
```
df_confirmed_cases = pd.read_csv(os.path.join(dir_input, "USA_Facts", "covid_confirmed_usafacts.csv"))
df_confirmed_cases
```
## Activity 2 - Adjust data types for countyFIPS in dataframe to make them strings.
```
df_confirmed_cases = df_confirmed_cases.astype({'countyFIPS': str})
df_confirmed_cases
```
## Activity 3: Upload the google mobility data and change the format for the countyFIPS
```
df_google_mobility_data = pd.read_csv(os.path.join(dir_input,
"Google",
"Region_Mobility_Report_CSVs",
"2020_US_Region_Mobility_Report.csv"))
df_google_mobility_data
```
Select 'sub_region_1','census_fips_code', 'date',
'retail_and_recreation_percent_change_from_baseline',
'grocery_and_pharmacy_percent_change_from_baseline',
'parks_percent_change_from_baseline',
'transit_stations_percent_change_from_baseline',
'workplaces_percent_change_from_baseline',
'residential_percent_change_from_baseline'
```
df_google_mobility_data_selected = df_google_mobility_data[['sub_region_1','census_fips_code', 'date',
'retail_and_recreation_percent_change_from_baseline',
'grocery_and_pharmacy_percent_change_from_baseline',
'parks_percent_change_from_baseline',
'transit_stations_percent_change_from_baseline',
'workplaces_percent_change_from_baseline',
'residential_percent_change_from_baseline']]
df_google_mobility_data_selected
df_google_mobility_data_selected = df_google_mobility_data_selected.rename(columns={"census_fips_code": "countyFIPS"})
df_google_mobility_data_selected
df_google_mobility_data_selected.columns
df_google_mobility_data_selected = df_google_mobility_data_selected.dropna(subset=['countyFIPS'
])
```
## PD3.3 Start with the DataFrame you created here. Change countyFIPS to string.
```
df_google_mobility_data_selected = df_google_mobility_data_selected.astype({'countyFIPS': int})
df_google_mobility_data_selected = df_google_mobility_data_selected.astype({'countyFIPS': str})
df_google_mobility_data_selected
```
|
github_jupyter
|
import os
import pandas as pd
using_Google_colab = False
using_Anaconda_on_Mac_or_Linux = True
using_Anaconda_on_windows = False
if using_Google_colab:
from google.colab import drive
drive.mount('/content/drive')
if using_Google_colab:
dir_input = "/content/drive/MyDrive/COVID_Project/input"
if using_Anaconda_on_Mac_or_Linux:
dir_input = "../input"
if using_Anaconda_on_windows:
dir_input = r"..\input"
df_confirmed_cases = pd.read_csv(os.path.join(dir_input, "USA_Facts", "covid_confirmed_usafacts.csv"))
df_confirmed_cases
df_confirmed_cases = df_confirmed_cases.astype({'countyFIPS': str})
df_confirmed_cases
df_google_mobility_data = pd.read_csv(os.path.join(dir_input,
"Google",
"Region_Mobility_Report_CSVs",
"2020_US_Region_Mobility_Report.csv"))
df_google_mobility_data
df_google_mobility_data_selected = df_google_mobility_data[['sub_region_1','census_fips_code', 'date',
'retail_and_recreation_percent_change_from_baseline',
'grocery_and_pharmacy_percent_change_from_baseline',
'parks_percent_change_from_baseline',
'transit_stations_percent_change_from_baseline',
'workplaces_percent_change_from_baseline',
'residential_percent_change_from_baseline']]
df_google_mobility_data_selected
df_google_mobility_data_selected = df_google_mobility_data_selected.rename(columns={"census_fips_code": "countyFIPS"})
df_google_mobility_data_selected
df_google_mobility_data_selected.columns
df_google_mobility_data_selected = df_google_mobility_data_selected.dropna(subset=['countyFIPS'
])
df_google_mobility_data_selected = df_google_mobility_data_selected.astype({'countyFIPS': int})
df_google_mobility_data_selected = df_google_mobility_data_selected.astype({'countyFIPS': str})
df_google_mobility_data_selected
| 0.147709 | 0.395397 |
# Convolutional variational autoencoder with PyMC3 and Keras
In this document, I will show how autoencoding variational Bayes (AEVB) works in PyMC3's automatic differentiation variational inference (ADVI). The example here is borrowed from [Keras example](https://github.com/fchollet/keras/blob/master/examples/variational_autoencoder_deconv.py), where convolutional variational autoencoder is applied to the MNIST dataset. The network architecture of the encoder and decoder are completely same. However, PyMC3 allows us to define the probabilistic model, which combines the encoder and decoder, in the way by which other general probabilistic models (e.g., generalized linear models), rather than directly implementing of Monte Carlo sampling and the loss function as done in the Keras example. Thus I think the framework of AEVB in PyMC3 can be extended to more complex models such as [latent dirichlet allocation](https://taku-y.github.io/notebook/20160928/lda-advi-ae.html).
- Notebook Written by Taku Yoshioka (c) 2016
For using Keras with PyMC3, we need to choose [Theano](http://deeplearning.net/software/theano/) as the backend of Keras.
Install required packages, including pymc3, if it is not already available:
```
#!pip install --upgrade git+https://github.com/Theano/Theano.git#egg=Theano
#!pip install --upgrade keras
#!pip install --upgrade pymc3
#!conda install -y mkl-service
%autosave 0
%matplotlib inline
import sys, os
os.environ['KERAS_BACKEND'] = 'theano'
from theano import config
config.floatX = 'float32'
config.optimizer = 'fast_run'
from collections import OrderedDict
from keras.layers import InputLayer, BatchNormalization, Dense, Convolution2D, Deconvolution2D, Activation, Flatten, Reshape
import numpy as np
import pymc3 as pm
from pymc3.variational import advi_minibatch
from theano import shared, config, function, clone, pp
import theano.tensor as tt
import keras
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
from keras import backend as K
K.set_image_dim_ordering('th')
import pymc3, theano
print(pymc3.__version__)
print(theano.__version__)
print(keras.__version__)
```
## Load images
MNIST dataset can be obtained by [scikit-learn API](http://scikit-learn.org/stable/datasets/). The dataset contains images of digits.
```
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
print(mnist.keys())
data = mnist['data'].reshape(-1, 1, 28, 28).astype('float32')
data /= np.max(data)
```
## Use Keras
We define a utility function to get parameters from Keras models. Since we have set the backend to Theano, parameter objects are obtained as shared variables of Theano.
In the code, 'updates' are expected to include update objects (dictionary of pairs of shared variables and update equation) of scaling parameters of batch normalization. While not using batch normalization in this example, if we want to use it, we need to pass these update objects as an argument of `theano.function()` inside the PyMC3 ADVI function. The current version of PyMC3 does not support it, it is easy to modify (I want to send PR in future).
The learning phase below is used for Keras to known the learning phase, training or test. This information is important also for batch normalization.
```
from keras.models import Sequential
from keras.layers import Dense, BatchNormalization
def get_params(model):
"""Get parameters and updates from Keras model
"""
shared_in_updates = list()
params = list()
updates = dict()
for l in model.layers:
attrs = dir(l)
# Updates
if 'updates' in attrs:
updates.update(l.updates)
shared_in_updates += [e[0] for e in l.updates]
# Shared variables
for attr_str in attrs:
attr = getattr(l, attr_str)
if type(attr) is tt.sharedvar.TensorSharedVariable:
if attr is not model.get_input_at(0):
params.append(attr)
return list(set(params) - set(shared_in_updates)), updates
# This code is required when using BatchNormalization layer
keras.backend.theano_backend._LEARNING_PHASE = \
shared(np.uint8(1), name='keras_learning_phase')
```
## Encoder and decoder
First, we define the convolutional neural network for encoder using Keras API. This function returns a CNN model given the shared variable representing observations (images of digits), the dimension of latent space, and the parameters of the model architecture.
```
def cnn_enc(xs, latent_dim, nb_filters=64, nb_conv=3, intermediate_dim=128):
"""Returns a CNN model of Keras.
Parameters
----------
xs : theano.tensor.sharedvar.TensorSharedVariable
Input tensor.
latent_dim : int
Dimension of latent vector.
"""
input_layer = InputLayer(input_tensor=xs,
batch_input_shape=xs.get_value().shape)
model = Sequential()
model.add(input_layer)
cp1 = {'border_mode': 'same', 'activation': 'relu'}
cp2 = {'border_mode': 'same', 'activation': 'relu', 'subsample': (2, 2)}
cp3 = {'border_mode': 'same', 'activation': 'relu', 'subsample': (1, 1)}
cp4 = cp3
model.add(Convolution2D(1, 2, 2, **cp1))
model.add(Convolution2D(nb_filters, 2, 2, **cp2))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, **cp3))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, **cp4))
model.add(Flatten())
model.add(Dense(intermediate_dim, activation='relu'))
model.add(Dense(2 * latent_dim))
return model
```
Then we define a utility class for encoders. This class does not depend on the architecture of the encoder except for input shape (`tensor4` for images), so we can use this class for various encoding networks.
```
class Encoder:
"""Encode observed images to variational parameters (mean/std of Gaussian).
Parameters
----------
xs : theano.tensor.sharedvar.TensorSharedVariable
Placeholder of input images.
dim_hidden : int
The number of hidden variables.
net : Function
Returns
"""
def __init__(self, xs, dim_hidden, net):
model = net(xs, dim_hidden)
self.model = model
self.xs = xs
self.out = model.get_output_at(-1)
self.means = self.out[:, :dim_hidden]
self.lstds = self.out[:, dim_hidden:]
self.params, self.updates = get_params(model)
self.enc_func = None
self.dim_hidden = dim_hidden
def _get_enc_func(self):
if self.enc_func is None:
xs = tt.tensor4()
means = clone(self.means, {self.xs: xs})
lstds = clone(self.lstds, {self.xs: xs})
self.enc_func = function([xs], [means, lstds])
return self.enc_func
def encode(self, xs):
# Used in test phase
keras.backend.theano_backend._LEARNING_PHASE.set_value(np.uint8(0))
enc_func = self._get_enc_func()
means, _ = enc_func(xs)
return means
def draw_samples(self, xs, n_samples=1):
"""Draw samples of hidden variables based on variational parameters encoded.
Parameters
----------
xs : numpy.ndarray, shape=(n_images, 1, height, width)
Images.
"""
# Used in test phase
keras.backend.theano_backend._LEARNING_PHASE.set_value(np.uint8(0))
enc_func = self._get_enc_func()
means, lstds = enc_func(xs)
means = np.repeat(means, n_samples, axis=0)
lstds = np.repeat(lstds, n_samples, axis=0)
ns = np.random.randn(len(xs) * n_samples, self.dim_hidden)
zs = means + np.exp(lstds) * ns
return ns
```
In a similar way, we define the decoding network and a utility class for decoders.
```
def cnn_dec(zs, nb_filters=64, nb_conv=3, output_shape=(1, 28, 28)):
"""Returns a CNN model of Keras.
Parameters
----------
zs : theano.tensor.var.TensorVariable
Input tensor.
"""
minibatch_size, dim_hidden = zs.tag.test_value.shape
input_layer = InputLayer(input_tensor=zs,
batch_input_shape=zs.tag.test_value.shape)
model = Sequential()
model.add(input_layer)
model.add(Dense(dim_hidden, activation='relu'))
model.add(Dense(nb_filters * 14 * 14, activation='relu'))
cp1 = {'border_mode': 'same', 'activation': 'relu', 'subsample': (1, 1)}
cp2 = cp1
cp3 = {'border_mode': 'valid', 'activation': 'relu', 'subsample': (2, 2)}
cp4 = {'border_mode': 'valid', 'activation': 'sigmoid'}
output_shape_ = (minibatch_size, nb_filters, 14, 14)
model.add(Reshape(output_shape_[1:]))
model.add(Deconvolution2D(nb_filters, nb_conv, nb_conv, output_shape_, **cp1))
model.add(Deconvolution2D(nb_filters, nb_conv, nb_conv, output_shape_, **cp2))
output_shape_ = (minibatch_size, nb_filters, 29, 29)
model.add(Deconvolution2D(nb_filters, 2, 2, output_shape_, **cp3))
model.add(Convolution2D(1, 2, 2, **cp4))
return model
class Decoder:
"""Decode hidden variables to images.
Parameters
----------
zs : Theano tensor
Hidden variables.
"""
def __init__(self, zs, net):
model = net(zs)
self.model = model
self.zs = zs
self.out = model.get_output_at(-1)
self.params, self.updates = get_params(model)
self.dec_func = None
def _get_dec_func(self):
if self.dec_func is None:
zs = tt.matrix()
xs = clone(self.out, {self.zs: zs})
self.dec_func = function([zs], xs)
return self.dec_func
def decode(self, zs):
"""Decode hidden variables to images.
An image consists of the mean parameters of the observation noise.
Parameters
----------
zs : numpy.ndarray, shape=(n_samples, dim_hidden)
Hidden variables.
"""
# Used in test phase
keras.backend.theano_backend._LEARNING_PHASE.set_value(np.uint8(0))
return self._get_dec_func()(zs)
```
## Generative model
We can construct the generative model with PyMC3 API and the functions and classes defined above. We set the size of mini-batches to 100 and the dimension of the latent space to 2 for visualization.
```
# Constants
minibatch_size = 200
dim_hidden = 2
```
A placeholder of images is required to which mini-batches of images will be placed in the ADVI inference. It is also the input to the encoder. In the below, `enc.model` is a Keras model of the encoder network, thus we can check the model architecture using the method `summary()`.
```
# Placeholder of images
xs_t = shared(np.zeros((minibatch_size, 1, 28, 28)).astype('float32'), name='xs_t')
# Encoder
enc = Encoder(xs_t, dim_hidden, net=cnn_enc)
enc.model.summary()
```
The probabilistic model involves only two random variables; latent variable $\mathbf{z}$ and observation $\mathbf{x}$. We put a Normal prior on $\mathbf{z}$, decode the variational parameters of $q(\mathbf{z}|\mathbf{x})$ and define the likelihood of the observation $\mathbf{x}$.
```
with pm.Model() as model:
# Hidden variables
zs = pm.Normal('zs', mu=0, sd=1, shape=(minibatch_size, dim_hidden), dtype='float32')
# Decoder and its parameters
dec = Decoder(zs, net=cnn_dec)
# Observation model
xs_ = pm.Normal('xs_', mu=dec.out.ravel(), sd=0.1, observed=xs_t.ravel(), dtype='float32')
```
In the above definition of the generative model, we do not know how the decoded variational parameters are passed to $q(\mathbf{z}|\mathbf{x})$. To do this, we will set the argument `local_RVs` in the ADVI function of PyMC3.
```
local_RVs = OrderedDict({zs: ((enc.means, enc.lstds), len(data) / float(minibatch_size))})
```
This argument is a `OrderedDict` whose keys are random variables to which the decoded variational parameters are set, `zs` in this model. Each value of the dictionary contains two theano expressions representing variational mean (`enc.means`) and log of standard deviations (`enc.lstds`). In addition, a scaling constant (`len(data) / float(minibatch_size)`) is required to compensate for the size of mini-batches of the corresponding log probability terms in the evidence lower bound (ELBO), the objective of the variational inference.
The scaling constant for the observed random variables is set in the same way.
```
observed_RVs = OrderedDict({xs_: len(data) / float(minibatch_size)})
```
We can also check the architecture of the decoding network as for the encoding network.
```
dec.model.summary()
```
## Inference
To perform inference, we need to create generators of mini-batches and define the optimizer used for ADVI. The optimizer is a function that returns Theano parameter update object (dictionary).
```
# Mini-batches
def create_minibatch(data, minibatch_size):
rng = np.random.RandomState(0)
start_idx = 0
while True:
# Return random data samples of set size batchsize each iteration
ixs = rng.randint(data.shape[0], size=minibatch_size)
yield data[ixs]
minibatches = zip(create_minibatch(data, minibatch_size))
def rmsprop(loss, param):
adam_ = keras.optimizers.RMSprop()
return adam_.get_updates(param, [], loss)
```
Let us execute ADVI function of PyMC3.
```
with model:
v_params = pm.variational.advi_minibatch(
n=1000, minibatch_tensors=[xs_t], minibatches=minibatches,
local_RVs=local_RVs, observed_RVs=observed_RVs,
encoder_params=(enc.params + dec.params),
optimizer=rmsprop
)
```
## Results
`v_params`, the returned value of the ADVI function, has the trace of ELBO during inference (optimization). We can see the convergence of the inference.
```
plt.plot(v_params.elbo_vals)
```
Finally, we see the distribution of the images in the latent space. To do this, we make 2-dimensional points in a grid and feed them into the decoding network. The mean of $p(\mathbf{x}|\mathbf{z})$ is the image corresponding to the samples on the grid.
```
zs = np.array([(z1, z2)
for z1 in np.arange(-2, 2, 0.2)
for z2 in np.arange(-2, 2, 0.2)]).astype('float32')
xs = dec.decode(zs)[:, 0, :, :]
xs = np.bmat([[xs[i + j * 20] for i in range(20)] for j in range(20)])
matplotlib.rc('axes', **{'grid': False})
plt.figure(figsize=(10, 10))
plt.imshow(xs, interpolation='none', cmap='gray')
```
|
github_jupyter
|
#!pip install --upgrade git+https://github.com/Theano/Theano.git#egg=Theano
#!pip install --upgrade keras
#!pip install --upgrade pymc3
#!conda install -y mkl-service
%autosave 0
%matplotlib inline
import sys, os
os.environ['KERAS_BACKEND'] = 'theano'
from theano import config
config.floatX = 'float32'
config.optimizer = 'fast_run'
from collections import OrderedDict
from keras.layers import InputLayer, BatchNormalization, Dense, Convolution2D, Deconvolution2D, Activation, Flatten, Reshape
import numpy as np
import pymc3 as pm
from pymc3.variational import advi_minibatch
from theano import shared, config, function, clone, pp
import theano.tensor as tt
import keras
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
from keras import backend as K
K.set_image_dim_ordering('th')
import pymc3, theano
print(pymc3.__version__)
print(theano.__version__)
print(keras.__version__)
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
print(mnist.keys())
data = mnist['data'].reshape(-1, 1, 28, 28).astype('float32')
data /= np.max(data)
from keras.models import Sequential
from keras.layers import Dense, BatchNormalization
def get_params(model):
"""Get parameters and updates from Keras model
"""
shared_in_updates = list()
params = list()
updates = dict()
for l in model.layers:
attrs = dir(l)
# Updates
if 'updates' in attrs:
updates.update(l.updates)
shared_in_updates += [e[0] for e in l.updates]
# Shared variables
for attr_str in attrs:
attr = getattr(l, attr_str)
if type(attr) is tt.sharedvar.TensorSharedVariable:
if attr is not model.get_input_at(0):
params.append(attr)
return list(set(params) - set(shared_in_updates)), updates
# This code is required when using BatchNormalization layer
keras.backend.theano_backend._LEARNING_PHASE = \
shared(np.uint8(1), name='keras_learning_phase')
def cnn_enc(xs, latent_dim, nb_filters=64, nb_conv=3, intermediate_dim=128):
"""Returns a CNN model of Keras.
Parameters
----------
xs : theano.tensor.sharedvar.TensorSharedVariable
Input tensor.
latent_dim : int
Dimension of latent vector.
"""
input_layer = InputLayer(input_tensor=xs,
batch_input_shape=xs.get_value().shape)
model = Sequential()
model.add(input_layer)
cp1 = {'border_mode': 'same', 'activation': 'relu'}
cp2 = {'border_mode': 'same', 'activation': 'relu', 'subsample': (2, 2)}
cp3 = {'border_mode': 'same', 'activation': 'relu', 'subsample': (1, 1)}
cp4 = cp3
model.add(Convolution2D(1, 2, 2, **cp1))
model.add(Convolution2D(nb_filters, 2, 2, **cp2))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, **cp3))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, **cp4))
model.add(Flatten())
model.add(Dense(intermediate_dim, activation='relu'))
model.add(Dense(2 * latent_dim))
return model
class Encoder:
"""Encode observed images to variational parameters (mean/std of Gaussian).
Parameters
----------
xs : theano.tensor.sharedvar.TensorSharedVariable
Placeholder of input images.
dim_hidden : int
The number of hidden variables.
net : Function
Returns
"""
def __init__(self, xs, dim_hidden, net):
model = net(xs, dim_hidden)
self.model = model
self.xs = xs
self.out = model.get_output_at(-1)
self.means = self.out[:, :dim_hidden]
self.lstds = self.out[:, dim_hidden:]
self.params, self.updates = get_params(model)
self.enc_func = None
self.dim_hidden = dim_hidden
def _get_enc_func(self):
if self.enc_func is None:
xs = tt.tensor4()
means = clone(self.means, {self.xs: xs})
lstds = clone(self.lstds, {self.xs: xs})
self.enc_func = function([xs], [means, lstds])
return self.enc_func
def encode(self, xs):
# Used in test phase
keras.backend.theano_backend._LEARNING_PHASE.set_value(np.uint8(0))
enc_func = self._get_enc_func()
means, _ = enc_func(xs)
return means
def draw_samples(self, xs, n_samples=1):
"""Draw samples of hidden variables based on variational parameters encoded.
Parameters
----------
xs : numpy.ndarray, shape=(n_images, 1, height, width)
Images.
"""
# Used in test phase
keras.backend.theano_backend._LEARNING_PHASE.set_value(np.uint8(0))
enc_func = self._get_enc_func()
means, lstds = enc_func(xs)
means = np.repeat(means, n_samples, axis=0)
lstds = np.repeat(lstds, n_samples, axis=0)
ns = np.random.randn(len(xs) * n_samples, self.dim_hidden)
zs = means + np.exp(lstds) * ns
return ns
def cnn_dec(zs, nb_filters=64, nb_conv=3, output_shape=(1, 28, 28)):
"""Returns a CNN model of Keras.
Parameters
----------
zs : theano.tensor.var.TensorVariable
Input tensor.
"""
minibatch_size, dim_hidden = zs.tag.test_value.shape
input_layer = InputLayer(input_tensor=zs,
batch_input_shape=zs.tag.test_value.shape)
model = Sequential()
model.add(input_layer)
model.add(Dense(dim_hidden, activation='relu'))
model.add(Dense(nb_filters * 14 * 14, activation='relu'))
cp1 = {'border_mode': 'same', 'activation': 'relu', 'subsample': (1, 1)}
cp2 = cp1
cp3 = {'border_mode': 'valid', 'activation': 'relu', 'subsample': (2, 2)}
cp4 = {'border_mode': 'valid', 'activation': 'sigmoid'}
output_shape_ = (minibatch_size, nb_filters, 14, 14)
model.add(Reshape(output_shape_[1:]))
model.add(Deconvolution2D(nb_filters, nb_conv, nb_conv, output_shape_, **cp1))
model.add(Deconvolution2D(nb_filters, nb_conv, nb_conv, output_shape_, **cp2))
output_shape_ = (minibatch_size, nb_filters, 29, 29)
model.add(Deconvolution2D(nb_filters, 2, 2, output_shape_, **cp3))
model.add(Convolution2D(1, 2, 2, **cp4))
return model
class Decoder:
"""Decode hidden variables to images.
Parameters
----------
zs : Theano tensor
Hidden variables.
"""
def __init__(self, zs, net):
model = net(zs)
self.model = model
self.zs = zs
self.out = model.get_output_at(-1)
self.params, self.updates = get_params(model)
self.dec_func = None
def _get_dec_func(self):
if self.dec_func is None:
zs = tt.matrix()
xs = clone(self.out, {self.zs: zs})
self.dec_func = function([zs], xs)
return self.dec_func
def decode(self, zs):
"""Decode hidden variables to images.
An image consists of the mean parameters of the observation noise.
Parameters
----------
zs : numpy.ndarray, shape=(n_samples, dim_hidden)
Hidden variables.
"""
# Used in test phase
keras.backend.theano_backend._LEARNING_PHASE.set_value(np.uint8(0))
return self._get_dec_func()(zs)
# Constants
minibatch_size = 200
dim_hidden = 2
# Placeholder of images
xs_t = shared(np.zeros((minibatch_size, 1, 28, 28)).astype('float32'), name='xs_t')
# Encoder
enc = Encoder(xs_t, dim_hidden, net=cnn_enc)
enc.model.summary()
with pm.Model() as model:
# Hidden variables
zs = pm.Normal('zs', mu=0, sd=1, shape=(minibatch_size, dim_hidden), dtype='float32')
# Decoder and its parameters
dec = Decoder(zs, net=cnn_dec)
# Observation model
xs_ = pm.Normal('xs_', mu=dec.out.ravel(), sd=0.1, observed=xs_t.ravel(), dtype='float32')
local_RVs = OrderedDict({zs: ((enc.means, enc.lstds), len(data) / float(minibatch_size))})
observed_RVs = OrderedDict({xs_: len(data) / float(minibatch_size)})
dec.model.summary()
# Mini-batches
def create_minibatch(data, minibatch_size):
rng = np.random.RandomState(0)
start_idx = 0
while True:
# Return random data samples of set size batchsize each iteration
ixs = rng.randint(data.shape[0], size=minibatch_size)
yield data[ixs]
minibatches = zip(create_minibatch(data, minibatch_size))
def rmsprop(loss, param):
adam_ = keras.optimizers.RMSprop()
return adam_.get_updates(param, [], loss)
with model:
v_params = pm.variational.advi_minibatch(
n=1000, minibatch_tensors=[xs_t], minibatches=minibatches,
local_RVs=local_RVs, observed_RVs=observed_RVs,
encoder_params=(enc.params + dec.params),
optimizer=rmsprop
)
plt.plot(v_params.elbo_vals)
zs = np.array([(z1, z2)
for z1 in np.arange(-2, 2, 0.2)
for z2 in np.arange(-2, 2, 0.2)]).astype('float32')
xs = dec.decode(zs)[:, 0, :, :]
xs = np.bmat([[xs[i + j * 20] for i in range(20)] for j in range(20)])
matplotlib.rc('axes', **{'grid': False})
plt.figure(figsize=(10, 10))
plt.imshow(xs, interpolation='none', cmap='gray')
| 0.688154 | 0.979433 |
# Aragon Conviction Voting Model - Version 1
## Model Overview
[Conviction Voting](https://medium.com/giveth/conviction-voting-a-novel-continuous-decision-making-alternative-to-governance-aa746cfb9475) is a novel decision making process where votes express their preference for which proposals they would like to see approved in a continuous rather than discrete way. The longer the community keeps a preference on an individual proposal, the “stronger” the proposal conviction becomes. In the conviction voting model, a graph structure is used to record the introduction and removal of participants, candidates, proposals, and their outcomes.
## cadCAD Overview
In the cadCAD simulation [methodology](https://community.cadcad.org/t/differential-specification-syntax-key/31), we operate on four layers: **Policies, Mechanisms, States**, and **Metrics**. Information flows do not have explicit feedback loop unless noted. **Policies** determine the inputs into the system dynamics, and can come from user input, observations from the exogenous environment, or algorithms. **Mechanisms** are functions that take the policy decisions and update the States to reflect the policy level changes. **States** are variables that represent the system quantities at the given point in time, and **Metrics** are computed from state variables to assess the health of the system. Metrics can often be thought of as KPIs, or Key Performance Indicators.
At a more granular level, to setup a model, there are system conventions and configurations that must be [followed.](https://community.cadcad.org/t/introduction-to-simulation-configurations/34)
The way to think of cadCAD modeling is analogous to machine learning pipelines which normally consist of multiple steps when training and running a deployed model. There is preprocessing, which includes segregating features between continuous and categorical, transforming or imputing data, and then instantiating, training, and running a machine learning model with specified hyperparameters. cadCAD modeling can be thought of in the same way as states, roughly translating into features, are fed into pipelines that have built-in logic to direct traffic between different mechanisms, such as scaling and imputation. Accuracy scores, ROC, etc. are analogous to the metrics that can be configured on a cadCAD model, specifying how well a given model is doing in meeting its objectives. The parameter sweeping capability of cadCAD can be thought of as a grid search, or way to find the optimal hyperparameters for a system by running through alternative scenarios. A/B style testing that cadCAD enables is used in the same way machine learning models are A/B tested, except out of the box, in providing a side by side comparison of muliple different models to compare and contrast performance. Utilizing the field of Systems Identification, dynamical systems models can be used to "online learn" by providing a feedback loop to generative system mechanisms.
## Differential Specification

## Schema of the states
The model consists of a temporal in memory graph database called *network* containing nodes of type **Participant** and type **Proposal**. Participants will have *holdings* and Proposals will have *funds_required, status*(candidate or active), *conviction*. Edges in the network go from nodes of type Participant to nodes of type Proposal with the edges having the key *type*, of which all will be set to *support*. Edges from participant $i$ to proposal $j$ will have the following additional characteristics:
* Each pairing (i,j) will have *affinity*, which determines how much $i$ likes or dislikes proposal $j$.
* Each participant $i$, assigns its $tokens$ over the edges (i,j) for all $j$ such that the summation of all $j$ such that ```Sum_j = network.edges[(i,j)]['tokens'] = network.nodes[i]['holdings']```
* Each pairing (i,j) will have *conviction* local to that edge whose update at each timestep is computed using the value of *tokens* at that edge.
* Each proposal *j* will have a *conviction* which is equal to the sum of the conviction on its inbound edges: ```network.nodes[j]['conviction'] = Sum_i network.edges[(i,j)]['conviction']```
* The "trigger function" will check whether each proposal $j$ has met the criteria for passing; if a proposal passes its *status* changes from *candidate* to *active*, and an amount of funds equal to its *funds_required* will be decremented from *funds*.
The other state variable in the model is *funds*, which is a numpy floating point.
The system consists of 100 time steps without a parameter sweep or monte carlo.
## Partial State Update Blocks
Each partial state update block is kind of a like a phase in a phased based board game. Everyone decides what to do and it reconciles all decisions. One timestep is a full turn, with each block being a phase of a timestep or turn. We will walk through the individaul Partial State update blocks one by one below.
```
{
# system.py:
'policies': {
'random': driving_process
},
'variables': {
'network': update_network,
'funds':increment_funds,
}
```
To simulate the arrival of participants and proposal into the system, we have a driving process to represent the arrival of individual agents. For simplification, we are using hyperparameters for supply and sentiment, 1,231,286.81 and 0.6 respectively. We use a random uniform distribution generator, over [0, 1), to calculate the number of new participants. We then use an expoential distribution to calculate the particpant's tokens by using a loc of 0.0 and a scale of expected holdings, which is calcutulaed by .1*supply/number of existing participants. We calcualte the number of new proposals by
```
proposal_rate = 1/median_affinity * (1+total_funds_requested/funds)
rv2 = np.random.rand()
new_proposal = bool(rv2<1/proposal_rate)
```
The network state variable is updated to include the new participants and proposals, while the funds state variable is updated for the increase in system funds.
[To see the partial state update code, click here](model/model/system.py)
```
{
# participants.py
'policies': {
'completion': check_progress
},
'variables': {
'network': complete_proposal
}
},
```
In the next phase of the turn, [to see the logic code, click here](model/model/participants.py), the *check_progress* behavior checks for the completion of previously funded proposals. The code calculates the completion and failure rates as follows:
```
likelihood = 1.0/(base_completion_rate+np.log(grant_size))
failure_rate = 1.0/(base_failure_rate+np.log(grant_size))
if np.random.rand() < likelihood:
completed.append(j)
elif np.random.rand() < failure_rate:
failed.append(j)
```
With the base_completion_rate being 100 and the base_failure_rate as 200.
The mechanism then updates the respective *network* nodes.
```
{
# proposals.py
'policies': {
'release': trigger_function
},
'variables': {
'funds': decrement_funds,
'network': update_proposals
}
},
```
The [trigger release function](model/model/proposals.py) checks to see if each proposal passes or not. If a proposal passes, funds are decremented by the amount of the proposal, while the proposal's status is changed in the network object.
```
{
# participants.py
'policies': {
'participants_act': participants_decisions
},
'variables': {
'network': update_tokens
}
}
```
The Participants decide based on their affinity if which proposals they would like to support,[to see the logic code, click here](model/model/participants.py). Proposals that participants have high affinity for receive more support and pledged tokens than proposals with lower affinity and sentiment. We then update everyone's holdings and their conviction for each proposal.
## Model next steps
The the model described above is a minimalist model, first iteration model that covers the core mechanisms of the Aragon Conviction Voting model. Below are next additional dynamics we can attend to enrich the model, and provide workstreams for subsequent iterations of this lab notebook.
* Sentiment
* Mixing of token holdings among participants
* Departure of participants
* Participants influencing each others opinions
* Proposals which are good or no good together
* Multiple proposal stages such as killed, failed and completed
* Affects of outcomes on sentiment
# Simulation
## Configuration
let's factor out into its own notebook where we review the config object and its partial state update blocks, with a slightly deeper dive on the trigger function.
```
from model import economyconfig
# pull out configurations to illustrate
sim_config,genesis_states,seeds,partial_state_update_blocks = economyconfig.get_configs()
sim_config
partial_state_update_blocks
```
## Initialization
To create the genesis_states, we create our in-memory graph database within networkx.
```
# import libraries
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from model.model.conviction_helper_functions import *
# Parameters
n= 60 #initial participants
m= 3 #initial proposals
initial_sentiment = .6
initial_funds = 40781.42
def initialize_network(n,m, inital_funds, expected_supply = 10**6):
'''
Definition:
Function to initialize network x object
'''
# initilize network x graph
network = nx.DiGraph()
# create participant nodes with type and token holding
for i in range(n):
network.add_node(i)
network.nodes[i]['type']= "participant"
h_rv = expon.rvs(loc=0.0, scale= expected_supply/n)
network.nodes[i]['holdings'] = h_rv
participants = get_nodes_by_type(network, 'participant')
initial_supply = np.sum([ network.nodes[i]['holdings'] for i in participants])
# Generate initial proposals
for ind in range(m):
j = n+ind
network.add_node(j)
network.nodes[j]['type']="proposal"
network.nodes[j]['conviction'] = 0
network.nodes[j]['status'] = 'candidate'
network.nodes[j]['age'] = 0
r_rv = gamma.rvs(3,loc=0.001, scale=10000)
network.nodes[j]['funds_requested'] = r_rv
network.nodes[j]['trigger']= trigger_threshold(r_rv, initial_funds, initial_supply)
for i in range(n):
network.add_edge(i, j)
rv = np.random.rand()
a_rv = 1-4*(1-rv)*rv #polarized distribution
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i, j)]['tokens'] = 0
network.edges[(i, j)]['conviction'] = 0
network.edges[(i, j)]['type'] = 'support'
proposals = get_nodes_by_type(network, 'proposal')
total_requested = np.sum([ network.nodes[i]['funds_requested'] for i in proposals])
return network, initial_funds
# run the initialize_network function to create the initial states of the simulation
network, initial_funds = initialize_network(n,m,initial_funds)
# Create initial states
genesis_states = {
'network':network,
'funds':initial_funds
}
genesis_states
# To explore our model prior to the simulation, we extract key components from our networkX object into lists.
proposals = get_nodes_by_type(network, 'proposal')
participants = get_nodes_by_type(network, 'participant')
supporters = get_edges_by_type(network, 'support')
```
#### Exploring the State Data Structure
A graph is a type of temporal data structure that evolves over time. A graph $\mathcal{G}(\mathcal{V},\mathcal{E})$ consists of vertices or nodes, $\mathcal{V} = \{1...\mathcal{V}\}$ and is connected by edges $\mathcal{E} \subseteq \mathcal{V} \times \mathcal{V}$.
See *Schema of the states* above for more details
Let's explore!
```
#sample a participant
network.nodes[participants[0]]
# Let's look at the distribution of participant holdings at the start of the sim
plt.hist([ network.nodes[i]['holdings'] for i in participants])
plt.title('Histogram of Participants Token Holdings')
#lets look at proposals
network.nodes[proposals[0]]
```
Proposals initially start without any conviction, and with the status of a candidate. If the proposal's amount of conviction is greater than it's trigger, then the proposal moves to active and it's funds requested are granted.
All initial proposal start with 0 conviction and state 'candidate'we can simply examine the amounts of funds requested
```
plt.bar( proposals, [ network.nodes[i]['funds_requested'] for i in proposals])
plt.title('Histogram of Proposals Funds Requested')
plt.xlabel('Proposals')
```
Conviction is a concept that arises in the edges between participants and proposals in the initial conditions there are no votes yet so we can look at that later however, the voting choices are driven by underlying affinities which we can see now.
```
affinities = np.empty((n,m))
for i_ind in range(n):
for j_ind in range(m):
i = participants[i_ind]
j = proposals[j_ind]
affinities[i_ind][j_ind] = network.edges[(i,j)]['affinity']
dims = (20, 5)
fig, ax = plt.subplots(figsize=dims)
sns.heatmap(affinities.T,
xticklabels=participants,
yticklabels=proposals,
square=True,
cbar=True,
ax=ax)
plt.title('affinities between participants and proposals')
plt.ylabel('proposal_id')
plt.xlabel('participant_id')
```
## Run simulation
Now we will create the final system configuration, append the genesis states we created, and run our simulation.
```
from cadCAD.configuration import append_configs
# Create configuration
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
seeds=seeds,
partial_state_update_blocks=partial_state_update_blocks
)
import numpy as np
import pandas as pd
from model.model.conviction_helper_functions import *
from model import run
from cadCAD import configs
pd.options.display.float_format = '{:.2f}'.format
%matplotlib inline
# Pass in configuration to run
df = run.run(configs)
```
After the simulation has run successfully, we perform some postprocessing to extract node and edge values from the network object and add as columns to the pandas dataframe. For the rdf, we take only the values at the last substep of each timestep in the simulation.
```
df,rdf = run.postprocessing(df)
affinities_plot(df)
rdf.plot(x='timestep',y=['candidate_count','active_count','completed_count', 'killed_count', 'failed_count'])
plt.title('Proposal Status')
plt.ylabel('count of proposals')
plt.legend(ncol = 3,loc='upper center', bbox_to_anchor=(0.5, -0.15))
rdf.plot(x='timestep',y=['candidate_funds','active_funds','completed_funds', 'killed_funds', 'failed_funds'])
plt.title('Proposal Status weighted by funds requested')
plt.ylabel('Funds worth of proposals')
plt.legend(ncol = 3,loc='upper center', bbox_to_anchor=(0.5, -0.15))
nets = rdf.network.values
K = 3
snap_plot(nets[K:K+1], size_scale = 1/300)
K = 56
snap_plot(nets[K:K+1], size_scale = 1/300)
quantile_plot('timestep','conviction_share_of_trigger', rdf, .25)
plt.hlines(1,0,df.timestep.values[-1], linestyle='--')
```
## Conclusion
We have created a simplified conviction voting model that illustrates the state objects, and provides descriptions of how the model fits together. In subsequent notebooks, we will expand the model to introduce additional complexity to more fit real world implementations.
|
github_jupyter
|
* Each pairing (i,j) will have *conviction* local to that edge whose update at each timestep is computed using the value of *tokens* at that edge.
* Each proposal *j* will have a *conviction* which is equal to the sum of the conviction on its inbound edges: ```network.nodes[j]['conviction'] = Sum_i network.edges[(i,j)]['conviction']```
* The "trigger function" will check whether each proposal $j$ has met the criteria for passing; if a proposal passes its *status* changes from *candidate* to *active*, and an amount of funds equal to its *funds_required* will be decremented from *funds*.
The other state variable in the model is *funds*, which is a numpy floating point.
The system consists of 100 time steps without a parameter sweep or monte carlo.
## Partial State Update Blocks
Each partial state update block is kind of a like a phase in a phased based board game. Everyone decides what to do and it reconciles all decisions. One timestep is a full turn, with each block being a phase of a timestep or turn. We will walk through the individaul Partial State update blocks one by one below.
To simulate the arrival of participants and proposal into the system, we have a driving process to represent the arrival of individual agents. For simplification, we are using hyperparameters for supply and sentiment, 1,231,286.81 and 0.6 respectively. We use a random uniform distribution generator, over [0, 1), to calculate the number of new participants. We then use an expoential distribution to calculate the particpant's tokens by using a loc of 0.0 and a scale of expected holdings, which is calcutulaed by .1*supply/number of existing participants. We calcualte the number of new proposals by
The network state variable is updated to include the new participants and proposals, while the funds state variable is updated for the increase in system funds.
[To see the partial state update code, click here](model/model/system.py)
In the next phase of the turn, [to see the logic code, click here](model/model/participants.py), the *check_progress* behavior checks for the completion of previously funded proposals. The code calculates the completion and failure rates as follows:
With the base_completion_rate being 100 and the base_failure_rate as 200.
The mechanism then updates the respective *network* nodes.
The [trigger release function](model/model/proposals.py) checks to see if each proposal passes or not. If a proposal passes, funds are decremented by the amount of the proposal, while the proposal's status is changed in the network object.
The Participants decide based on their affinity if which proposals they would like to support,[to see the logic code, click here](model/model/participants.py). Proposals that participants have high affinity for receive more support and pledged tokens than proposals with lower affinity and sentiment. We then update everyone's holdings and their conviction for each proposal.
## Model next steps
The the model described above is a minimalist model, first iteration model that covers the core mechanisms of the Aragon Conviction Voting model. Below are next additional dynamics we can attend to enrich the model, and provide workstreams for subsequent iterations of this lab notebook.
* Sentiment
* Mixing of token holdings among participants
* Departure of participants
* Participants influencing each others opinions
* Proposals which are good or no good together
* Multiple proposal stages such as killed, failed and completed
* Affects of outcomes on sentiment
# Simulation
## Configuration
let's factor out into its own notebook where we review the config object and its partial state update blocks, with a slightly deeper dive on the trigger function.
## Initialization
To create the genesis_states, we create our in-memory graph database within networkx.
#### Exploring the State Data Structure
A graph is a type of temporal data structure that evolves over time. A graph $\mathcal{G}(\mathcal{V},\mathcal{E})$ consists of vertices or nodes, $\mathcal{V} = \{1...\mathcal{V}\}$ and is connected by edges $\mathcal{E} \subseteq \mathcal{V} \times \mathcal{V}$.
See *Schema of the states* above for more details
Let's explore!
Proposals initially start without any conviction, and with the status of a candidate. If the proposal's amount of conviction is greater than it's trigger, then the proposal moves to active and it's funds requested are granted.
All initial proposal start with 0 conviction and state 'candidate'we can simply examine the amounts of funds requested
Conviction is a concept that arises in the edges between participants and proposals in the initial conditions there are no votes yet so we can look at that later however, the voting choices are driven by underlying affinities which we can see now.
## Run simulation
Now we will create the final system configuration, append the genesis states we created, and run our simulation.
After the simulation has run successfully, we perform some postprocessing to extract node and edge values from the network object and add as columns to the pandas dataframe. For the rdf, we take only the values at the last substep of each timestep in the simulation.
| 0.902569 | 0.991954 |
<a href="https://colab.research.google.com/github/holgerfach/dw_matrix_car/blob/master/day_04.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!pip install --upgrade tables
!pip install eli5
!pip install xgboost
import pandas as pd
import numpy as np
from sklearn.dummy import DummyRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score, KFold
import eli5
from eli5.sklearn import PermutationImportance
cd "/content/drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_car"
df = pd.read_hdf('data/car.h5')
df.shape
```
##Feature Engineering
```
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorized_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
cat_feats = [x for x in df.columns if SUFFIX_CAT in x ]
cat_feats = [x for x in cat_feats if 'price' not in x ]
len(cat_feats)
def run_model(model, feats):
X = df[feats].values
y = df['price_value'].values
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
```
##DecisionTree
```
run_model( DecisionTreeRegressor(max_depth=5), feats )
run_model( DecisionTreeRegressor(max_depth=5), feats )
```
##Random Forest
```
model = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0)
run_model(model, feats )
```
##XGBoost
```
xgb_params = {
'max_depth': 5,
'n_estimators': 50,
'learning_rate': 0.1,
'seed': 0
}
run_model(xgb.XGBRegressor(**xgb_params), feats)
m = xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0)
m.fit(X, y)
imp = PermutationImportance(m, random_state=0).fit(X, y)
eli5.show_weights(imp, feature_names=cat_feats)
feats = [
'param_napęd__cat',
'param_rok-produkcji__cat',
'param_stan__cat',
'param_skrzynia-biegów__cat',
'param_faktura-vat__cat',
'param_moc__cat',
'param_marka-pojazdu__cat',
'feature_kamera-cofania__cat',
'param_typ__cat',
'param_pojemność-skokowa__cat',
'seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat'
]
run_model(xgb.XGBRegressor(**xgb_params), feats)
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))
feats = [
'param_napęd__cat',
'param_rok-produkcji',
'param_stan__cat',
'param_skrzynia-biegów__cat',
'param_faktura-vat__cat',
'param_moc__cat',
'param_marka-pojazdu__cat',
'feature_kamera-cofania__cat',
'param_typ__cat',
'param_pojemność-skokowa__cat',
'seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat'
]
run_model(xgb.XGBRegressor(**xgb_params), feats)
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]) )
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))
feats = [
'param_napęd__cat',
'param_rok-produkcji',
'param_stan__cat',
'param_skrzynia-biegów__cat',
'param_faktura-vat__cat',
'param_moc',
'param_marka-pojazdu__cat',
'feature_kamera-cofania__cat',
'param_typ__cat',
'param_pojemność-skokowa__cat',
'seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat'
]
run_model(xgb.XGBRegressor(**xgb_params), feats)
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int( str(x).split('cm')[0].replace(' ', '')) )
feats = [
'param_napęd__cat',
'param_rok-produkcji',
'param_stan__cat',
'param_skrzynia-biegów__cat',
'param_faktura-vat__cat',
'param_moc',
'param_marka-pojazdu__cat',
'feature_kamera-cofania__cat',
'param_typ__cat',
'param_pojemność-skokowa',
'seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat'
]
run_model(xgb.XGBRegressor(**xgb_params), feats)
df['param_wersja'].unique()
```
|
github_jupyter
|
!pip install --upgrade tables
!pip install eli5
!pip install xgboost
import pandas as pd
import numpy as np
from sklearn.dummy import DummyRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score, KFold
import eli5
from eli5.sklearn import PermutationImportance
cd "/content/drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_car"
df = pd.read_hdf('data/car.h5')
df.shape
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorized_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
cat_feats = [x for x in df.columns if SUFFIX_CAT in x ]
cat_feats = [x for x in cat_feats if 'price' not in x ]
len(cat_feats)
def run_model(model, feats):
X = df[feats].values
y = df['price_value'].values
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
run_model( DecisionTreeRegressor(max_depth=5), feats )
run_model( DecisionTreeRegressor(max_depth=5), feats )
model = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0)
run_model(model, feats )
xgb_params = {
'max_depth': 5,
'n_estimators': 50,
'learning_rate': 0.1,
'seed': 0
}
run_model(xgb.XGBRegressor(**xgb_params), feats)
m = xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0)
m.fit(X, y)
imp = PermutationImportance(m, random_state=0).fit(X, y)
eli5.show_weights(imp, feature_names=cat_feats)
feats = [
'param_napęd__cat',
'param_rok-produkcji__cat',
'param_stan__cat',
'param_skrzynia-biegów__cat',
'param_faktura-vat__cat',
'param_moc__cat',
'param_marka-pojazdu__cat',
'feature_kamera-cofania__cat',
'param_typ__cat',
'param_pojemność-skokowa__cat',
'seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat'
]
run_model(xgb.XGBRegressor(**xgb_params), feats)
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))
feats = [
'param_napęd__cat',
'param_rok-produkcji',
'param_stan__cat',
'param_skrzynia-biegów__cat',
'param_faktura-vat__cat',
'param_moc__cat',
'param_marka-pojazdu__cat',
'feature_kamera-cofania__cat',
'param_typ__cat',
'param_pojemność-skokowa__cat',
'seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat'
]
run_model(xgb.XGBRegressor(**xgb_params), feats)
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]) )
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x))
feats = [
'param_napęd__cat',
'param_rok-produkcji',
'param_stan__cat',
'param_skrzynia-biegów__cat',
'param_faktura-vat__cat',
'param_moc',
'param_marka-pojazdu__cat',
'feature_kamera-cofania__cat',
'param_typ__cat',
'param_pojemność-skokowa__cat',
'seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat'
]
run_model(xgb.XGBRegressor(**xgb_params), feats)
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int( str(x).split('cm')[0].replace(' ', '')) )
feats = [
'param_napęd__cat',
'param_rok-produkcji',
'param_stan__cat',
'param_skrzynia-biegów__cat',
'param_faktura-vat__cat',
'param_moc',
'param_marka-pojazdu__cat',
'feature_kamera-cofania__cat',
'param_typ__cat',
'param_pojemność-skokowa',
'seller_name__cat',
'feature_wspomaganie-kierownicy__cat',
'param_model-pojazdu__cat',
'param_wersja__cat',
'param_kod-silnika__cat',
'feature_system-start-stop__cat',
'feature_asystent-pasa-ruchu__cat',
'feature_czujniki-parkowania-przednie__cat',
'feature_łopatki-zmiany-biegów__cat',
'feature_regulowane-zawieszenie__cat'
]
run_model(xgb.XGBRegressor(**xgb_params), feats)
df['param_wersja'].unique()
| 0.475605 | 0.82573 |
## Logistic Regression
```
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split,KFold
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix,accuracy_score,precision_score,\
recall_score,roc_curve,auc
import expectation_reflection as ER
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from function import split_train_test,make_data_balance
np.random.seed(1)
```
First of all, the processed data are imported.
```
#data_list = ['1paradox','2peptide','3stigma']
#data_list = np.loadtxt('data_list.txt',dtype='str')
data_list = np.loadtxt('data_list_30sets.txt',dtype='str')
#data_list = ['9coag']
print(data_list)
def read_data(data_id):
data_name = data_list[data_id]
print('data_name:',data_name)
Xy = np.loadtxt('../classification_data/%s/data_processed_knn9.dat'%data_name)
X = Xy[:,:-1]
y = Xy[:,-1]
#print(np.unique(y,return_counts=True))
X,y = make_data_balance(X,y)
print(np.unique(y,return_counts=True))
X, y = shuffle(X, y, random_state=1)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.5,random_state = 1)
sc = MinMaxScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
return X_train,X_test,y_train,y_test
def measure_performance(X_train,X_test,y_train,y_test):
#model = LogisticRegression(max_iter=100)
model = SGDClassifier(loss='log',max_iter=1000,tol=0.001) # 'log' for logistic regression, 'hinge' for SVM
# regularization penalty space
#penalty = ['l1','l2']
penalty = ['elasticnet']
# solver
#solver=['saga']
#solver=['liblinear']
# regularization hyperparameter space
#C = np.logspace(0, 4, 10)
#C = [0.001,0.1,1.0,10.0,100.0]
alpha = [0.001,0.01,0.1,1.0,10.,100.]
# l1_ratio
#l1_ratio = [0.1,0.5,0.9]
l1_ratio = [0.,0.2,0.4,0.6,0.8,1.0]
# Create hyperparameter options
#hyperparameters = dict(penalty=penalty,solver=solver,C=C,l1_ratio=l1_ratio)
#hyper_parameters = dict(penalty=penalty,solver=solver,C=C)
hyper_parameters = dict(penalty=penalty,alpha=alpha,l1_ratio=l1_ratio)
# Create grid search using cross validation
clf = GridSearchCV(model, hyper_parameters, cv=4, iid='deprecated')
# Fit grid search
best_model = clf.fit(X_train, y_train)
# View best hyperparameters
#print('Best Penalty:', best_model.best_estimator_.get_params()['penalty'])
#print('Best C:', best_model.best_estimator_.get_params()['C'])
#print('Best alpha:', best_model.best_estimator_.get_params()['alpha'])
#print('Best l1_ratio:', best_model.best_estimator_.get_params()['l1_ratio'])
# best hyper parameters
print('best_hyper_parameters:',best_model.best_params_)
# performance:
y_test_pred = best_model.best_estimator_.predict(X_test)
acc = accuracy_score(y_test,y_test_pred)
#print('Accuracy:', acc)
p_test_pred = best_model.best_estimator_.predict_proba(X_test) # prob of [0,1]
p_test_pred = p_test_pred[:,1] # prob of 1
fp,tp,thresholds = roc_curve(y_test, p_test_pred, drop_intermediate=False)
roc_auc = auc(fp,tp)
#print('AUC:', roc_auc)
precision = precision_score(y_test,y_test_pred)
#print('Precision:',precision)
recall = recall_score(y_test,y_test_pred)
#print('Recall:',recall)
f1_score = 2*precision*recall/(precision+recall)
return acc,roc_auc,precision,recall,f1_score
n_data = len(data_list)
roc_auc = np.zeros(n_data) ; acc = np.zeros(n_data)
precision = np.zeros(n_data) ; recall = np.zeros(n_data)
f1_score = np.zeros(n_data)
#data_id = 0
for data_id in range(n_data):
X_train,X_test,y_train,y_test = read_data(data_id)
acc[data_id],roc_auc[data_id],precision[data_id],recall[data_id],f1_score[data_id] =\
measure_performance(X_train,X_test,y_train,y_test)
print(data_id,acc[data_id],roc_auc[data_id],precision[data_id],recall[data_id],f1_score[data_id])
print('acc_mean:',acc.mean())
print('roc_mean:',roc_auc.mean())
print('precision:',precision.mean())
print('recall:',recall.mean())
print('f1_score:',f1_score.mean())
np.savetxt('result_knn9_LR.dat',(roc_auc,acc,precision,recall,f1_score),fmt='%f')
```
|
github_jupyter
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split,KFold
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix,accuracy_score,precision_score,\
recall_score,roc_curve,auc
import expectation_reflection as ER
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from function import split_train_test,make_data_balance
np.random.seed(1)
#data_list = ['1paradox','2peptide','3stigma']
#data_list = np.loadtxt('data_list.txt',dtype='str')
data_list = np.loadtxt('data_list_30sets.txt',dtype='str')
#data_list = ['9coag']
print(data_list)
def read_data(data_id):
data_name = data_list[data_id]
print('data_name:',data_name)
Xy = np.loadtxt('../classification_data/%s/data_processed_knn9.dat'%data_name)
X = Xy[:,:-1]
y = Xy[:,-1]
#print(np.unique(y,return_counts=True))
X,y = make_data_balance(X,y)
print(np.unique(y,return_counts=True))
X, y = shuffle(X, y, random_state=1)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.5,random_state = 1)
sc = MinMaxScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
return X_train,X_test,y_train,y_test
def measure_performance(X_train,X_test,y_train,y_test):
#model = LogisticRegression(max_iter=100)
model = SGDClassifier(loss='log',max_iter=1000,tol=0.001) # 'log' for logistic regression, 'hinge' for SVM
# regularization penalty space
#penalty = ['l1','l2']
penalty = ['elasticnet']
# solver
#solver=['saga']
#solver=['liblinear']
# regularization hyperparameter space
#C = np.logspace(0, 4, 10)
#C = [0.001,0.1,1.0,10.0,100.0]
alpha = [0.001,0.01,0.1,1.0,10.,100.]
# l1_ratio
#l1_ratio = [0.1,0.5,0.9]
l1_ratio = [0.,0.2,0.4,0.6,0.8,1.0]
# Create hyperparameter options
#hyperparameters = dict(penalty=penalty,solver=solver,C=C,l1_ratio=l1_ratio)
#hyper_parameters = dict(penalty=penalty,solver=solver,C=C)
hyper_parameters = dict(penalty=penalty,alpha=alpha,l1_ratio=l1_ratio)
# Create grid search using cross validation
clf = GridSearchCV(model, hyper_parameters, cv=4, iid='deprecated')
# Fit grid search
best_model = clf.fit(X_train, y_train)
# View best hyperparameters
#print('Best Penalty:', best_model.best_estimator_.get_params()['penalty'])
#print('Best C:', best_model.best_estimator_.get_params()['C'])
#print('Best alpha:', best_model.best_estimator_.get_params()['alpha'])
#print('Best l1_ratio:', best_model.best_estimator_.get_params()['l1_ratio'])
# best hyper parameters
print('best_hyper_parameters:',best_model.best_params_)
# performance:
y_test_pred = best_model.best_estimator_.predict(X_test)
acc = accuracy_score(y_test,y_test_pred)
#print('Accuracy:', acc)
p_test_pred = best_model.best_estimator_.predict_proba(X_test) # prob of [0,1]
p_test_pred = p_test_pred[:,1] # prob of 1
fp,tp,thresholds = roc_curve(y_test, p_test_pred, drop_intermediate=False)
roc_auc = auc(fp,tp)
#print('AUC:', roc_auc)
precision = precision_score(y_test,y_test_pred)
#print('Precision:',precision)
recall = recall_score(y_test,y_test_pred)
#print('Recall:',recall)
f1_score = 2*precision*recall/(precision+recall)
return acc,roc_auc,precision,recall,f1_score
n_data = len(data_list)
roc_auc = np.zeros(n_data) ; acc = np.zeros(n_data)
precision = np.zeros(n_data) ; recall = np.zeros(n_data)
f1_score = np.zeros(n_data)
#data_id = 0
for data_id in range(n_data):
X_train,X_test,y_train,y_test = read_data(data_id)
acc[data_id],roc_auc[data_id],precision[data_id],recall[data_id],f1_score[data_id] =\
measure_performance(X_train,X_test,y_train,y_test)
print(data_id,acc[data_id],roc_auc[data_id],precision[data_id],recall[data_id],f1_score[data_id])
print('acc_mean:',acc.mean())
print('roc_mean:',roc_auc.mean())
print('precision:',precision.mean())
print('recall:',recall.mean())
print('f1_score:',f1_score.mean())
np.savetxt('result_knn9_LR.dat',(roc_auc,acc,precision,recall,f1_score),fmt='%f')
| 0.391522 | 0.712432 |
# Assignment 2
## Q1
$$
\begin{array}{rl}
Z =X / Y \\
\nabla X_{i j}=\frac{\partial l}{\partial X_{i j}} & =\sum_{a b} \frac{\partial l}{\partial Z_{a b}} \frac{\partial Z_{a b}}{\partial X_{i j}} \\
& =\sum_{a b} \nabla Z_{a b} \frac{\partial[X / Y]_{a b}}{\partial X_{i j}} \\
& =\sum_{a b} \nabla Z_{a b} \frac{\partial\left(\frac{X_{a b}}{Y_{a b}}\right)}{\partial X_{i j}} \\
& =\nabla Z_{i j} \frac{\partial\left(\frac{X_{i j}}{Y_{i j}}\right)}{\partial X_{i j}} \\
\nabla X_{i j} & =\nabla Z_{i j} \frac{1}{Y_{i j}} \\
\nabla Y_{i j} & =\nabla Z_{i j}\left(-\frac{X_{i j}}{Y_{i j}^{2}}\right) \\
\nabla X & =\frac{\nabla Z}{Y} \\
\nabla Y & =-\frac{Z \times X}{Y^{\cdot 2}}
\end{array}
$$
where $Y^{\cdot 2}$ is the elementise application of $f(x) = x^2$.
## Q2
$$
\begin{aligned} F: X \rightarrow Y, Y_{i j} &=f\left(X_{i j}\right) \\ \nabla X_{i j}=\frac{\partial l}{\partial X_{i j}} &=\sum_{a b c} \frac{\partial l}{\partial Y_{a b c}} \frac{\partial Y_{a b c}}{\partial X_{i j k}} \\ &=\sum_{a b c} \nabla Y_{a b c} \frac{\partial Y_{a b c}}{\partial X_{i j k}} \\ &=\sum_{a b c} \nabla Y_{a b c} \frac{\partial f\left(X_{a b c}\right)}{\partial X_{i j k}} \\ &=\nabla Y_{i j k} \frac{\partial f\left(X_{i j k}\right)}{\partial X_{i j k}} \\ &=\nabla Y_{i j k} f^{\prime}\left(X_{i j k}\right) \forall f\end{aligned}
$$
The vectoriZed operation then depends on $f$ of course.
## Q3
$$
W \in \mathbb{R}^{f \times m},
X \in \mathbb{R}^{n \times f},
Z \leftarrow X \circ W \in \mathbb{R}^{n \times m}
$$
$$
\begin{aligned} \nabla W_{i j}=\frac{\partial l}{\partial W_{i j}} &=\sum_{n m} \frac{\partial l}{\partial Z_{n m}} \frac{\partial Z_{n m}}{\partial W_{i j}} \\ &=\sum_{n m} \nabla Z_{n m} \frac{\partial Z_{n m}}{\partial W_{i j}} \\ &=\sum_{n m} \nabla Z_{n m} \frac{\partial \Sigma_{f} X_{n f} W_{f m}}{\partial W_{i j}} \\ &=\sum_{n} \nabla Z_{n j} \frac{\partial X_{n i} W_{i j}}{\partial W_{i j}} \\ \nabla W_{i j} &=\sum_{n} \nabla Z_{n j} X_{n i} \\ \nabla W &=X^{\top} \circ \nabla Z \end{aligned}
$$
and
$$
\begin{aligned} \nabla X_{i j} &=\sum_{n m} \nabla Z_{n m} \frac{\partial Z_{n m}}{\partial X_{i j}} \\ &=\sum_{n m} \nabla Z_{n m} \frac{\partial \sum_{f} X_{n f} W_{f m}}{\partial X_{i j}} \\ &=\sum_{m} \nabla Z_{i m} \frac{\partial X_{i j} W_{j m}}{\partial X_{i j}} \\ \nabla X_{i j} &=\sum_{m} \nabla Z_{i m} W_{j m} \\ \nabla X &= \nabla Z \cdot W^{\top} \end{aligned}
$$
## Q4
$$
f: \mathbb{R}^{n} \rightarrow \mathbb{R}^{n \times m}
$$
$$
f(X)=\underset{(n \times 1)}{X} \underset{(1 \times 16)}{\mathbb{1}^{\top}} \to Z \in \mathbb{R}^{n \times 16}
$$
$$
\begin{aligned} \nabla X_{i} &=\sum_{a b} \frac{\partial l}{\partial Z_{a b}} \frac{\partial Z_{a b}}{\partial X_{i}} \\ &=\sum_{a b} \nabla Z_{a b} \frac{\partial Z_{a b}}{\partial X_{i}} \\ &=\sum_{a b} \nabla Z_{a b} \frac{\partial\left[X \mathbb{1}^{\top} \right]_{a b}}{\partial X_{i}} \\ &=\sum_{a b} \nabla Z_{a b} \frac{\partial X_{a} \cdot \mathbb{1}^{\top}_{b}}{\partial X_{i}} \\ &=\sum_{b} \nabla Z_{i b} \frac{\partial X_{i} \mathbb{1}^{\top}_{b}}{\partial X_{i}} \\ \nabla X_{i} &=\sum_{b} \nabla Z_{i b} \quad \mathbb{1}^{\top}_{b} \\ \nabla X &= \nabla Z \mathbb{1} \end{aligned}
$$
## Q5
```
import vugrad as vg
import numpy as np
a = vg.TensorNode(np.random.randn(2, 2))
b = vg.TensorNode(np.random.randn(2, 2))
c = a + b
```
### 5.1
c.value is the value of the actual array that results from the operation. In this case elementwise addition of a and b.
### 5.2
c.source refers to the origin of the creation of c. A source should contain both the input objects as well as the operation, i.e. add(a, b)
### 5.3
c.source.inputs[0] must be a.
```
c.source.inputs[0] is a
```
### 5.4
At this point c.grad should contain simply a placeholder for the gradient, possibly NaNs or Zeros.
## Q6
### 6.1
Theres the Op class which defines the operation under the hood. We dont need to explictily instantiate it, but we refer to it when we create OpNodes.
### 6.2
In vugrad the <+> operator has been overloaded to do tensor (elementwise) addition of objects instead of scalar addition as it would be the standard in python. In line 104 (core.py) onwards we can see how different builtin operations have been overloaded. In line 317, an add class is additionally defined (inherits from Op of course).
### 6.3
The actual output is computed when we are upstreaming the computation graph. The do_forward method of the Op class is called when the forward pass is performed on the entire computation graph. Code can be found in 217:249.
## Q7
The backward function is called on the TensorNode as soon as we collect gradients when calling loss.backward(), triggering the cascade of backward()-calls on its parents and so forth. <--Line 97-->
### Q8
a. NORMALIZE
Let
$$
X \in \mathbb{R}^{m \times n} \\
Z = \frac{X}{X \mathbb{1}}
$$
Then
$$
\begin{aligned}
X_{i j}^{\nabla} &=\sum_{a b} \frac{\partial l}{\partial Z_{a b}} \cdot \frac{\partial Z_{a b}}{\partial X_{i j}}=\sum_{a b} Z_{a b}^{\nabla} \frac{\partial \frac{X_{a b}}{\sum_{c} X_{a c}}}{\partial X_{i j}}=\sum_{b} Z_{i b} \cdot \frac{\partial \frac{X_{i b}}{\sum_{c} X_{i c}}}{\partial X_{i j}}=\\
&=\sum_{b} Z_{i b}\left(\frac{X_{i b}^{\prime}}{\sum_{c} X_{i c}}-\frac{X_{i b}}{\left(\sum_{c} X_{i c}\right)^{2}}\right)=\sum_{b} Z_{i b}^{\nabla} \frac{X_{i b}^{\prime}}{\sum_{c} X_{i c}}-\sum_{b} Z_{i b}^{\nabla} \cdot \frac{X_{i b}}{\left(\sum_{c} X_{i c}\right)^{2}} \\
&=\frac{Z_{i j}^{\nabla}}{\sum_{c} X_{i c}}-\sum_{b} Z_{i b}^{\nabla} \cdot \frac{X_{i b}}{\left(\sum_{c} X_{i c}\right)^{2}} \\
X^{\nabla} &=\frac{Z^{\nabla}}{X \cdot \mathbb{1}}-\left(\frac{Z^{\nabla} \odot X}{(X \cdot \mathbb{1})^{\circ 2}}\right) \cdot \mathbb{1}
\end{aligned}
$$
b. EXPAND-SINGLTON
input
$$
X \in \mathbb{R}^{a \times b \times ... \times 1 \times ... }
$$
number of repeats expansions in the singleton : $K$.
$$
f: \mathbb{R}^{n \times 1} \rightarrow \mathbb{R}^{n \times K}
$$
$$
f(X)=\underset{(n \times 1)}{X} \underset{(1 \times K)}{\mathbb{1}^{\top}} \to Z \in \mathbb{R}^{n \times K}
$$
$$
\begin{aligned} \nabla X_{i} &=\sum_{a b} \frac{\partial l}{\partial Z_{a b}} \frac{\partial Z_{a b}}{\partial X_{i}} \\ &=\sum_{a b} \nabla Z_{a b} \frac{\partial Z_{a b}}{\partial X_{i}} \\ &=\sum_{a b} \nabla Z_{a b} \frac{\partial\left[X \mathbb{1}^{\top} \right]_{a b}}{\partial X_{i}} \\ &=\sum_{a b} \nabla Z_{a b} \frac{\partial X_{a} \cdot \mathbb{1}^{\top}_{b}}{\partial X_{i}} \\ &=\sum_{b} \nabla Z_{i b} \frac{\partial X_{i} \mathbb{1}^{\top}_{b}}{\partial X_{i}} \\ \nabla X_{i} &=\sum_{b} \nabla Z_{i b} \quad \mathbb{1}^{\top}_{b} \\ \nabla X &= \nabla Z \mathbb{1} \end{aligned}
$$
c. EXPAND-SCALAR : a special case of expand, where a single scalar is being expanded to any shape. Its not implemented in vugrad
$$
\begin{array}{c}
s \gets \left(a, b, c_, \ldots \right) \\
f(x, s)=Z \in \mathbb{R}^{s}, Z_{a b c ...}=x \ \forall \ a b c ... \ \in s \\
\nabla x=\sum_{a b} \nabla Z_{a b} \frac{\partial Z_{a b}}{\partial x} \\
\nabla x=\sum_{a b} \nabla Z_{a b} \frac{\partial x}{\partial x} \\
\nabla x=\sum_{a b} \nabla Z_{a b} \\
\nabla x=\mathbb{1}^{\top} \nabla Z_{a b} \mathbb{1}
\end{array}
$$
## Q9
```Python
# The operation
class relu(Op):
@staticmethod
def forward(context, input):
relu_x = np.fmax(input, np.Zeros(input.shape))
context['relu_x'] = relu_x
return relu_x
@staticmethod
def backward(context, goutput):
relu_x = context['relu_x']
return goutput * np.where(relu_x > 0, 1, 0)
# The wrapper
def relu_(x):
return relu.do_forward(x)
Using a learning rate of `10e-6` both the sigmoid and the relu achieve a validation accuracy of 93-96 % **after a single epoch**. To be exact:
93.32 (+- 1.98)
96.01 (+- 1.42)
again, after a single epoch ( runs : n=10 )
When I continue training (5 epochs) , I observe an increase up to about 98% accuracy for the relu and 96% for the sigmoid. To summariZe, the relu is always 2-3% better in terms of validation accuracy gain per epoch. However I didnt notice any training speedup, which I expected.
## Q10
Since the model is already very good yielding a validation accuracy of 98% after just a couple of epochs, there is not so much I can experiment with to make it better. However what I can do is I can try to decrease the number of parameters of the model in a way that doesnt hurt the performance and without using a CNN. Right now the model has 2.493.130 parameters. By going adding more layers and using skip connections I can downsiZe the model significantly. With
```python
self.layer1 = vg.Linear(input_size, hidden_size)
self.layer2 = vg.Linear(hidden_size, hidden_size)
self.layer3 = vg.Linear(hidden_size, hidden_size)
self.layer4 = vg.Linear(hidden_size, hidden_size)
self.layer5 = vg.Linear(hidden_size, hidden_size)
```
and doing the forward like this :
```python
Z1 = self.layer1(input)
A1 = vg.relu_(Z1)
Z2 = self.layer2(A1)
A2 = vg.relu_(Z2 + A1)
Z3 = self.layer3(A2)
A3 = vg.relu_(Z3 + A2)
Z4 = self.layer4(A3)
A4 = vg.relu_(Z4 + A3)
Z5 = self.layer5(A4)
A5 = output = vg.logsoftmax(Z5)
```
I am having a total of 4 weight sets and a total of 49.450 parameters, compared to the default model with 2.493.130 parameters my model is about 50 times smaller and trains at least 5 times faster.
```
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
mynet = pd.read_csv("result.csv",
index_col=0)[["loss","accuracy", "epoch"]]
default = pd.read_csv("result0.csv",
index_col=0)[["loss","accuracy", "epoch"]]
mynet["model"] = "tiny"
default["model"] = "default"
df = mynet.append(default).reset_index(drop=True)
fig, axes = plt.subplots(1,2,sharey=False, figsiZe=(12,7))
sns.lineplot(data=df[df.epoch >= 1], x="epoch", y="loss", hue="model",ax=axes[0])
sns.lineplot(data=df[df.epoch >= 1], x="epoch", y="accuracy", hue="model",ax=axes[1])
```
We can see that my tiny model is a little bit worse (~ -1%) accuracy (10 runs with 20 epochs each) , but it has 50x less parameters and trains much faster. Note that epoch 1 was omitted from the chart.
## Q11
I changed the default hyperparameter configuration by looking at different values of the learning rate and batch size and epochs. The results are not particularly interesting, as we see the expected behaviour. row0 represents the default config (= Nothing changed, just using Adam instead of SGDM)
```
lr_results
batch_results
epoch_results
```
Compared to the default hyperparams, increasing the number of epochs as well as using a smaller learning rate might be a good idea.
## Q12
The network is already quite efficient in terms of size due to the use of the specialized convolutional layers. I added some standard tricks to the architecture : dropout in the FC layers, and Batchnorm across the whole model.
```python
class myNet(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(
nn.BatchNorm2d(3),
nn.Conv2d(3, 6, 5),
nn.ReLU(),
nn.BatchNorm2d(6),
nn.MaxPool2d(2, 2),
nn.Conv2d(6, 16, 5),
nn.ReLU(),
nn.BatchNorm2d(16),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(16 * 5 * 5, 120),
nn.ReLU(),
nn.BatchNorm1d(120),
nn.Dropout(0.5),
nn.Linear(120, 84),
nn.ReLU(),
nn.BatchNorm1d(84),
nn.Linear(84, 10))
def forward(self, x):
return self.layers(x)
```
```
df = df1.append(df2).reset_index(drop=True)
import matplotlib.pyplot as plt, seaborn as sns
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14,6))
sns.lineplot(data=df, x="epoch", y="loss", hue="model",ax=axes[0])
accuracy_results.boxplot(ax=axes[1])
```
For a total of 5 runs with 10 epochs each, above we see the learning curve as well as the test acc distributions. My model is significantly worse than the default model, although it uses batchnorm and dropout. I guess dropout of 0.5 is just making the model underfit. 0.1 or simply doubling the epochs might give a different view.
|
github_jupyter
|
import vugrad as vg
import numpy as np
a = vg.TensorNode(np.random.randn(2, 2))
b = vg.TensorNode(np.random.randn(2, 2))
c = a + b
c.source.inputs[0] is a
# The operation
class relu(Op):
@staticmethod
def forward(context, input):
relu_x = np.fmax(input, np.Zeros(input.shape))
context['relu_x'] = relu_x
return relu_x
@staticmethod
def backward(context, goutput):
relu_x = context['relu_x']
return goutput * np.where(relu_x > 0, 1, 0)
# The wrapper
def relu_(x):
return relu.do_forward(x)
Using a learning rate of `10e-6` both the sigmoid and the relu achieve a validation accuracy of 93-96 % **after a single epoch**. To be exact:
93.32 (+- 1.98)
96.01 (+- 1.42)
again, after a single epoch ( runs : n=10 )
When I continue training (5 epochs) , I observe an increase up to about 98% accuracy for the relu and 96% for the sigmoid. To summariZe, the relu is always 2-3% better in terms of validation accuracy gain per epoch. However I didnt notice any training speedup, which I expected.
## Q10
Since the model is already very good yielding a validation accuracy of 98% after just a couple of epochs, there is not so much I can experiment with to make it better. However what I can do is I can try to decrease the number of parameters of the model in a way that doesnt hurt the performance and without using a CNN. Right now the model has 2.493.130 parameters. By going adding more layers and using skip connections I can downsiZe the model significantly. With
and doing the forward like this :
I am having a total of 4 weight sets and a total of 49.450 parameters, compared to the default model with 2.493.130 parameters my model is about 50 times smaller and trains at least 5 times faster.
We can see that my tiny model is a little bit worse (~ -1%) accuracy (10 runs with 20 epochs each) , but it has 50x less parameters and trains much faster. Note that epoch 1 was omitted from the chart.
## Q11
I changed the default hyperparameter configuration by looking at different values of the learning rate and batch size and epochs. The results are not particularly interesting, as we see the expected behaviour. row0 represents the default config (= Nothing changed, just using Adam instead of SGDM)
Compared to the default hyperparams, increasing the number of epochs as well as using a smaller learning rate might be a good idea.
## Q12
The network is already quite efficient in terms of size due to the use of the specialized convolutional layers. I added some standard tricks to the architecture : dropout in the FC layers, and Batchnorm across the whole model.
| 0.748076 | 0.994467 |
#Using deep features to build an image classifier
#Fire up GraphLab Create
```
import graphlab
```
#Load a common image analysis dataset
We will use a popular benchmark dataset in computer vision called CIFAR-10.
(We've reduced the data to just 4 categories = {'cat','bird','automobile','dog'}.)
This dataset is already split into a training set and test set.
```
image_train = graphlab.SFrame('image_train_data/')
image_test = graphlab.SFrame('image_test_data/')
```
#Exploring the image data
```
graphlab.canvas.set_target('ipynb')
image_train['image'].show()
image_train.show()
```
#Train a classifier on the raw image pixels
We first start by training a classifier on just the raw pixels of the image.
```
raw_pixel_model = graphlab.logistic_classifier.create(image_train,target='label',
features=['image_array'])
```
#Make a prediction with the simple model based on raw pixels
```
image_test[0:3]['image'].show()
image_test[0:3]['label']
raw_pixel_model.predict(image_test[0:3])
```
The model makes wrong predictions for all three images.
#Evaluating raw pixel model on test data
```
raw_pixel_model.evaluate(image_test)
```
The accuracy of this model is poor, getting only about 46% accuracy.
#Can we improve the model using deep features
We only have 2005 data points, so it is not possible to train a deep neural network effectively with so little data. Instead, we will use transfer learning: using deep features trained on the full ImageNet dataset, we will train a simple model on this small dataset.
```
len(image_train)
```
##Computing deep features for our images
The two lines below allow us to compute deep features. This computation takes a little while, so we have already computed them and saved the results as a column in the data you loaded.
(Note that if you would like to compute such deep features and have a GPU on your machine, you should use the GPU enabled GraphLab Create, which will be significantly faster for this task.)
```
#deep_learning_model = graphlab.load_model('http://s3.amazonaws.com/GraphLab-Datasets/deeplearning/imagenet_model_iter45')
#image_train['deep_features'] = deep_learning_model.extract_features(image_train)
```
As we can see, the column deep_features already contains the pre-computed deep features for this data.
```
image_train.head()
```
#Given the deep features, let's train a classifier
```
deep_features_model = graphlab.logistic_classifier.create(image_train,
features=['deep_features'],
target='label')
```
#Apply the deep features model to first few images of test set
```
image_test[0:3]['image'].show()
deep_features_model.predict(image_test[0:3])
```
The classifier with deep features gets all of these images right!
#Compute test_data accuracy of deep_features_model
As we can see, deep features provide us with significantly better accuracy (about 78%)
```
deep_features_model.evaluate(image_test)
```
|
github_jupyter
|
import graphlab
image_train = graphlab.SFrame('image_train_data/')
image_test = graphlab.SFrame('image_test_data/')
graphlab.canvas.set_target('ipynb')
image_train['image'].show()
image_train.show()
raw_pixel_model = graphlab.logistic_classifier.create(image_train,target='label',
features=['image_array'])
image_test[0:3]['image'].show()
image_test[0:3]['label']
raw_pixel_model.predict(image_test[0:3])
raw_pixel_model.evaluate(image_test)
len(image_train)
#deep_learning_model = graphlab.load_model('http://s3.amazonaws.com/GraphLab-Datasets/deeplearning/imagenet_model_iter45')
#image_train['deep_features'] = deep_learning_model.extract_features(image_train)
image_train.head()
deep_features_model = graphlab.logistic_classifier.create(image_train,
features=['deep_features'],
target='label')
image_test[0:3]['image'].show()
deep_features_model.predict(image_test[0:3])
deep_features_model.evaluate(image_test)
| 0.38943 | 0.987375 |
```
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
%matplotlib inline
```
### Introduction
By using this dataset, I wish to explore what impacts the 'Black Friday' sales. Black Friday is the official kick off to the holiday season. It is one of the most important time for a retailer or a wholesaler and thus it is important to understand what impacts the user buying behaviour. By using the insights, how can a business improve their throughput is what I wish to explain from this Blog Post.
I will try to answer the following questions:
Question 1: A quick analysis of most spending users to find out who has the highest CLV?
Question 2: How is the distribution of users across age and gender?
Question 3: Which products garner the maximum sales during black friday?
Question 4: How are the users spread across cities and occupation?
Question 5: Correlation between Gender, Age, Occupation, City_Category, Stay_In_Current_City_Years, Marital_Status, Product_Category_x vs Purchase
### Data Understanding
This project will use Black Friday Dataset From Kaggle, which is a sample of the transactions made in a retail store.
Below are the steps to look at and understand the dataset.
```
# Read in the Complete Dataset
BlackFriday_Dataset = pd.read_csv('./BlackFriday.csv')
BlackFriday_Dataset.head()
# Get the Basic info of the dataset
BlackFriday_Dataset.describe()
BlackFriday_Dataset.info()
#Provide the number of rows in the dataset
num_rows = BlackFriday_Dataset.shape[0]
#Provide the number of columns in the dataset
num_cols = BlackFriday_Dataset.shape[1]
print("No of rows: {}".format(num_rows))
print("Number of Columns: {}".format(num_cols))
# To check the column names in the dataset
BlackFriday_Dataset.columns
```
### Prepare Data
Some data preparation steps need to be done before using the dataset for exploration, including:
1. Checking columns with missing values and analyze impact
2. Dealing with missing values
3. One-Hot Encoding for Categorical variables such as Club, Nationality, Preferred Positions
```
# Data Preparation Step 1: check how many missing values are in the dataset
BlackFriday_Dataset.isnull().sum()
```
After check, missing values only exist in column "Product_Category_2" & "Product_Category_3". From the describe of the dataset, the min values of "Product_Category_2" & "Product_Category_3" are non-zero. My understanding is that the missing value in "Product_Category_2" & "Product_Category_3" means the customer didn't purchase products in these two category. Thus we can use "0" to fill in the missing value.
```
# Data Preparation Step 2: Fill the missing cell with zero
BlackFriday_Dataset.fillna(0)
# Data Preparation Step 3: One-Hot Encoding for Categorical variables
# One-hot encode the feature: Gender, Age, City_Category, Stay_in_curent_City_Years
le = LabelEncoder()
BlackFriday_Dataset['Gender_onehot_encode'] = le.fit_transform(BlackFriday_Dataset['Gender'].astype(str))
BlackFriday_Dataset['Age_onehot_encode'] = le.fit_transform(BlackFriday_Dataset['Age'].astype(str))
BlackFriday_Dataset['City_Category_onehot_encode'] = le.fit_transform(BlackFriday_Dataset['City_Category'].astype(str))
BlackFriday_Dataset['Stay_In_Current_City_Years_encode'] = le.fit_transform(BlackFriday_Dataset['Stay_In_Current_City_Years'].astype(str))
BlackFriday_Dataset.head()
```
### Answer Questions base on dataset
I have come up some question to be answered by the Data exploration
```
# Question 1: Which User spent most during black Friday, list the top 20 spending users
plt.figure(figsize = (20,8))
BlackFriday_Dataset.groupby('User_ID')['Purchase'].sum().nlargest(20).sort_values().plot('barh')
```
It's important for the seller to identify high quality customers. These customers with higher purchase amount should be valued. Understanding the needs of these customers will help the merchant to make more suitable operational decisions, such as product type, pricing, after-sales, etc. Loyalty promgram, advertisments should be made to keep these customers continuing shopping with the merchant.
```
# Question 2: How about the User Distribution by Age Group? And also consider Gender
plt.figure(figsize = (20,8))
sns.countplot(BlackFriday_Dataset['Age'])
plt.figure(figsize = (20,8))
sns.countplot(BlackFriday_Dataset['Age'],hue=BlackFriday_Dataset['Gender'])
```
We can find from the plot that most of the users who participate in the Black Friday Sale are from age group 26-35, 36-45 and 18-25, which is reasonable as these customers are in the golden age of their life.They make more money than other age groups, and they also have more shopping needs comparing to other age groups.
From the second plot, we can find for all age group, Male customers shop more comparing to Female customers. I think this is because that the most worthwhile things to buy on the Black Friday are electrical appliances, small appliances, and game consoles. Apple products, especially iPad, the price of Black Five is the best in a year.Obviously such products are more popular with male customers.
```
# Question 3: Which products are most popular during Black Friday, list the top 20
plt.figure(figsize = (20,8))
BlackFriday_Dataset.groupby('Product_ID')['Purchase'].count().nlargest(20).sort_values().plot('barh')
```
List out the most popular products may help the merchant adjust their business strategy and can prepare for the next shopping season better so that to Increase revenue and profit.
```
# Question 4: Look at the users again, this time focus on group by Occupation in different city
plt.figure(figsize = (20,8))
sns.countplot(BlackFriday_Dataset['Occupation'], hue = BlackFriday_Dataset["City_Category"])
```
The plot shows that for almost all Occupation Category, users from Citi B did more shopping compring to users from City A & Citi C. I think the reason is City B is larger than City A & Citi C and thus has a larger population. And customers from occupation 0, 4, 7 did more shopping than other occupations.
```
# Question 5: Correlation between Gender, Age, Occupation, City_Category, Stay_In_Current_City_Years, Marital_Status, Product_Category_x vs Purchase
Correlation_DF = BlackFriday_Dataset[['Gender_onehot_encode', 'Age_onehot_encode', 'Occupation', 'City_Category_onehot_encode',
'Stay_In_Current_City_Years_encode', 'Marital_Status', 'Product_Category_1', 'Product_Category_2', 'Product_Category_3', 'Purchase']]
Correlation_DF.corr()
colormap = plt.cm.inferno
plt.figure(figsize=(16,12))
plt.title('Correlation between Gender, Age, Occupation, City_Category, Stay_In_Current_City_Years, Marital_Status, Product_Category_x vs Purchase', y=1.05, size=15)
sns.heatmap(Correlation_DF.corr(),linewidths=0.1,vmax=1.0,
square=True, cmap=colormap, linecolor='white', annot=True)
```
From the correlation heatmap above, we can conclude that Gender & City_Category are most postive related to Purchase comparing to other features, while all Product_Category features are negative related to Purchase. Marital_Status, Stay_In_Current_City_Years are not so important features that relate to Purchase. All three Product_Category are highly correlated to each other. Besides that, we can also find that Martital_Status are highly related to Age, which is quite reasonable.
|
github_jupyter
|
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
%matplotlib inline
# Read in the Complete Dataset
BlackFriday_Dataset = pd.read_csv('./BlackFriday.csv')
BlackFriday_Dataset.head()
# Get the Basic info of the dataset
BlackFriday_Dataset.describe()
BlackFriday_Dataset.info()
#Provide the number of rows in the dataset
num_rows = BlackFriday_Dataset.shape[0]
#Provide the number of columns in the dataset
num_cols = BlackFriday_Dataset.shape[1]
print("No of rows: {}".format(num_rows))
print("Number of Columns: {}".format(num_cols))
# To check the column names in the dataset
BlackFriday_Dataset.columns
# Data Preparation Step 1: check how many missing values are in the dataset
BlackFriday_Dataset.isnull().sum()
# Data Preparation Step 2: Fill the missing cell with zero
BlackFriday_Dataset.fillna(0)
# Data Preparation Step 3: One-Hot Encoding for Categorical variables
# One-hot encode the feature: Gender, Age, City_Category, Stay_in_curent_City_Years
le = LabelEncoder()
BlackFriday_Dataset['Gender_onehot_encode'] = le.fit_transform(BlackFriday_Dataset['Gender'].astype(str))
BlackFriday_Dataset['Age_onehot_encode'] = le.fit_transform(BlackFriday_Dataset['Age'].astype(str))
BlackFriday_Dataset['City_Category_onehot_encode'] = le.fit_transform(BlackFriday_Dataset['City_Category'].astype(str))
BlackFriday_Dataset['Stay_In_Current_City_Years_encode'] = le.fit_transform(BlackFriday_Dataset['Stay_In_Current_City_Years'].astype(str))
BlackFriday_Dataset.head()
# Question 1: Which User spent most during black Friday, list the top 20 spending users
plt.figure(figsize = (20,8))
BlackFriday_Dataset.groupby('User_ID')['Purchase'].sum().nlargest(20).sort_values().plot('barh')
# Question 2: How about the User Distribution by Age Group? And also consider Gender
plt.figure(figsize = (20,8))
sns.countplot(BlackFriday_Dataset['Age'])
plt.figure(figsize = (20,8))
sns.countplot(BlackFriday_Dataset['Age'],hue=BlackFriday_Dataset['Gender'])
# Question 3: Which products are most popular during Black Friday, list the top 20
plt.figure(figsize = (20,8))
BlackFriday_Dataset.groupby('Product_ID')['Purchase'].count().nlargest(20).sort_values().plot('barh')
# Question 4: Look at the users again, this time focus on group by Occupation in different city
plt.figure(figsize = (20,8))
sns.countplot(BlackFriday_Dataset['Occupation'], hue = BlackFriday_Dataset["City_Category"])
# Question 5: Correlation between Gender, Age, Occupation, City_Category, Stay_In_Current_City_Years, Marital_Status, Product_Category_x vs Purchase
Correlation_DF = BlackFriday_Dataset[['Gender_onehot_encode', 'Age_onehot_encode', 'Occupation', 'City_Category_onehot_encode',
'Stay_In_Current_City_Years_encode', 'Marital_Status', 'Product_Category_1', 'Product_Category_2', 'Product_Category_3', 'Purchase']]
Correlation_DF.corr()
colormap = plt.cm.inferno
plt.figure(figsize=(16,12))
plt.title('Correlation between Gender, Age, Occupation, City_Category, Stay_In_Current_City_Years, Marital_Status, Product_Category_x vs Purchase', y=1.05, size=15)
sns.heatmap(Correlation_DF.corr(),linewidths=0.1,vmax=1.0,
square=True, cmap=colormap, linecolor='white', annot=True)
| 0.609408 | 0.93852 |
# Benchmark
```
%pip install --upgrade https://github.com/remifan/commplax/archive/master.zip
%pip install --upgrade https://github.com/remifan/LabPtPTm2/archive/master.zip
%pip install --upgrade https://github.com/remifan/gdbp_study/archive/master.zip
import os
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from functools import partial
import matplotlib.pyplot as plt
from commplax import util
from gdbp import gdbp_base as gb, data as gdat, aux
aux.dl_trained_params() # download pre-trained parameters
loc_trained_params = './trained_params/pretrained'
LP = np.array([-4, -2, -1, 0, 1]) # launched power in dBm
# use Pandas.Dataframe to store the results
df_test_res = pd.DataFrame({c: pd.Series(dtype=t) for c, t in {'ChInd': 'int',
'LPdBm': 'float',
'Model': 'str',
'Q': 'float'}.items()})
# check `gdbp_study-master/src/gdbp_base.py` for model definition
def init_models(data: gdat.Input, **kwargs):
''' make CDC and DBP's derivatives
(all methods has trainable R-filter)
cdc: static D-filter, no NLC
dbp: static D-filter, scalar manually optimized NLC factor
fdbp: static D-filter, static N-filter scaled by manually optimized NLC factor
edbp: static D-filter, tap-by-tap optimizable/trainable N-filter
gdbp: tap-by-tap optimizable/trainable D-filter and N-filter
'''
mode = kwargs.get('mode', 'train')
steps = kwargs.get('steps', 3)
dtaps = kwargs.get('dtaps', 261)
ntaps = kwargs.get('ntaps', 41)
rtaps = kwargs.get('rtaps', 61)
xi = kwargs.get('xi', 1.1) # optimal xi for FDBP
fdbp_init = partial(gb.fdbp_init, data.a, steps=steps)
model_init = partial(gb.model_init, data)
comm_conf = {'mode': mode, 'steps': steps, 'dtaps': dtaps, 'rtaps': rtaps} # common configurations
# init. func.| define model structure parameters and some initial values | define static modules | identifier
cdc = model_init({**comm_conf, 'ntaps': 1, 'init_fn': fdbp_init(xi=0.0)}, [('fdbp_0',)], name='CDC')
dbp = model_init({**comm_conf, 'ntaps': 1, 'init_fn': fdbp_init(xi=0.15)}, [('fdbp_0',)], name='DBP')
fdbp = model_init({**comm_conf, 'ntaps': ntaps, 'init_fn': fdbp_init(xi=xi)}, [('fdbp_0',)], name='FDBP')
edbp = model_init({**comm_conf, 'ntaps': ntaps, 'init_fn': fdbp_init(xi=xi)}, [('fdbp_0', r'DConv_\d')], name='EDBP')
gdbp = model_init({**comm_conf, 'ntaps': ntaps, 'init_fn': fdbp_init(xi=xi)}, [], name='GDBP')
return cdc, dbp, fdbp, edbp, gdbp
def load_data(ch: int):
''' build pairs of datasets for training and testing '''
ds_train = gdat.load(1, LP, ch, 2)
ds_test = gdat.load(2, LP, ch, 1)
return [(ds_tr, ds_te) for ds_tr, ds_te in zip(ds_train, ds_test)]
def sweep_channel(ch: int,
df_test_res=df_test_res,
use_pretrained_params=False,
save_params=False,
save_subdirname='regular_taps',
model_init_kwargs={}):
''' sweep launched power of given channel '''
util.clear_xla_cache() # release JAX's cache to save RAM
# iterate data of target channel
for i, chds in enumerate(tqdm(load_data(ch),
desc=f'sweeping launch power on Ch#{ch}',
leave=False)):
ds_train, ds_test = chds
models_train = init_models(ds_train, **model_init_kwargs)
models_test = init_models(ds_test, mode='test', **model_init_kwargs)
# iterate models
for j, (model_train, model_test) in enumerate(tqdm(zip(models_train, models_test),
desc='iterating models',
leave=False,
total=len(models_train))):
params_file = os.path.join(loc_trained_params,
'snr_vs_lp',
save_subdirname,
'params_%d_%d_%d' % (ch, i, j)) # params_{ch}_{lp}_{mod}
if use_pretrained_params:
params = util.load_variable(params_file)
else:
# use trained params of the 3rd last batch, as tailing samples are corrupted by CD
params_queue = [None] * 3
# iterate the training steps
for _, p, _ in gb.train(model_train, ds_train, n_iter=2000):
params_queue.append(p)
params = params_queue.pop(0)
if save_params:
util.save_variable(params, params_file)
test_Q = gb.test(model_test, params, ds_test)[0].QSq.total
# collect result
df_test_res = df_test_res.append({'ChInd': ch,
'LPdBm': ds_test.a['lpdbm'],
'Model': model_test.name,
'Q': test_Q},
ignore_index=True)
return df_test_res
# it may take a while to finish
kwargs = {'save_subdirname': 'regular_taps',
'use_pretrained_params': True, # use pretrained parameters to save time
'save_params': False} # save trained parameters after training
for ch in tqdm(1 + np.arange(7), desc='sweeping channels'):
df_test_res = sweep_channel(ch, df_test_res, **kwargs)
df_test_res
# save results
df_test_res.to_csv('benchmark_regular_taps.csv', index=False)
```
Now we visualize the results by manipulating the results table, see [Pandas.Dataframe](https://pandas.pydata.org/pandas-docs/stable/user_guide/dsintro.html) for instructions.
```
grp_ch = df_test_res.loc[df_test_res['ChInd'].isin([1, 4])].groupby('ChInd')
for n_ch, g_ch in grp_ch:
fig, ax = plt.subplots(figsize=(5, 3), dpi=300)
for n_mod, g_mod in g_ch.groupby('Model', sort=False):
ax.plot(g_mod.LPdBm, g_mod.Q, '-o', label=n_mod)
ax.legend(loc='upper left')
ax.set_title(f'Ch#{n_ch}')
ax.set_xlabel('Launched Power (dBm)')
ax.set_ylabel('Q-facotr (dB)')
fig, ax = plt.subplots(figsize=(5, 3), dpi=300)
fmt = ['o', '^', '<', 's', '*']
grp_opt_Q = df_test_res.groupby(['ChInd', 'Model'], as_index=False, sort=False)['Q'] \
.max().groupby('Model', sort=False)
for f, (n, g) in zip(fmt, grp_opt_Q):
ax.plot(g.ChInd, g.Q, f, label=n)
ax.legend()
ax.set_xlabel('channel index')
ax.set_ylabel('Q-factor (dB)')
ax.set_ylim([8.15, 9.5])
ax.set_title(r'Optimal $Q$-factor')
# it may take a while to finish
kwargs = {'save_subdirname': 'few_taps',
'use_pretrained_params': True, # use pretrained parameters to save time
'save_params': False,
'model_init_kwargs': {'dtaps': 221, 'ntaps': 11, 'xi': 0.5}}
df_test_res_ft = sweep_channel(4, df_test_res, **kwargs)
df_test_res_ft
fig, ax = plt.subplots(figsize=(5, 3), dpi=300)
for n_mod, g_mod in df_test_res_ft.groupby('Model', sort=False):
ax.plot(g_mod.LPdBm, g_mod.Q, '-o', label=n_mod)
ax.legend(loc='upper left')
ax.set_title(f'Ch4 (fewer taps)')
ax.set_xlabel('Launched Power (dBm)')
ax.set_ylabel('Q-facotr (dB)')
```
|
github_jupyter
|
%pip install --upgrade https://github.com/remifan/commplax/archive/master.zip
%pip install --upgrade https://github.com/remifan/LabPtPTm2/archive/master.zip
%pip install --upgrade https://github.com/remifan/gdbp_study/archive/master.zip
import os
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from functools import partial
import matplotlib.pyplot as plt
from commplax import util
from gdbp import gdbp_base as gb, data as gdat, aux
aux.dl_trained_params() # download pre-trained parameters
loc_trained_params = './trained_params/pretrained'
LP = np.array([-4, -2, -1, 0, 1]) # launched power in dBm
# use Pandas.Dataframe to store the results
df_test_res = pd.DataFrame({c: pd.Series(dtype=t) for c, t in {'ChInd': 'int',
'LPdBm': 'float',
'Model': 'str',
'Q': 'float'}.items()})
# check `gdbp_study-master/src/gdbp_base.py` for model definition
def init_models(data: gdat.Input, **kwargs):
''' make CDC and DBP's derivatives
(all methods has trainable R-filter)
cdc: static D-filter, no NLC
dbp: static D-filter, scalar manually optimized NLC factor
fdbp: static D-filter, static N-filter scaled by manually optimized NLC factor
edbp: static D-filter, tap-by-tap optimizable/trainable N-filter
gdbp: tap-by-tap optimizable/trainable D-filter and N-filter
'''
mode = kwargs.get('mode', 'train')
steps = kwargs.get('steps', 3)
dtaps = kwargs.get('dtaps', 261)
ntaps = kwargs.get('ntaps', 41)
rtaps = kwargs.get('rtaps', 61)
xi = kwargs.get('xi', 1.1) # optimal xi for FDBP
fdbp_init = partial(gb.fdbp_init, data.a, steps=steps)
model_init = partial(gb.model_init, data)
comm_conf = {'mode': mode, 'steps': steps, 'dtaps': dtaps, 'rtaps': rtaps} # common configurations
# init. func.| define model structure parameters and some initial values | define static modules | identifier
cdc = model_init({**comm_conf, 'ntaps': 1, 'init_fn': fdbp_init(xi=0.0)}, [('fdbp_0',)], name='CDC')
dbp = model_init({**comm_conf, 'ntaps': 1, 'init_fn': fdbp_init(xi=0.15)}, [('fdbp_0',)], name='DBP')
fdbp = model_init({**comm_conf, 'ntaps': ntaps, 'init_fn': fdbp_init(xi=xi)}, [('fdbp_0',)], name='FDBP')
edbp = model_init({**comm_conf, 'ntaps': ntaps, 'init_fn': fdbp_init(xi=xi)}, [('fdbp_0', r'DConv_\d')], name='EDBP')
gdbp = model_init({**comm_conf, 'ntaps': ntaps, 'init_fn': fdbp_init(xi=xi)}, [], name='GDBP')
return cdc, dbp, fdbp, edbp, gdbp
def load_data(ch: int):
''' build pairs of datasets for training and testing '''
ds_train = gdat.load(1, LP, ch, 2)
ds_test = gdat.load(2, LP, ch, 1)
return [(ds_tr, ds_te) for ds_tr, ds_te in zip(ds_train, ds_test)]
def sweep_channel(ch: int,
df_test_res=df_test_res,
use_pretrained_params=False,
save_params=False,
save_subdirname='regular_taps',
model_init_kwargs={}):
''' sweep launched power of given channel '''
util.clear_xla_cache() # release JAX's cache to save RAM
# iterate data of target channel
for i, chds in enumerate(tqdm(load_data(ch),
desc=f'sweeping launch power on Ch#{ch}',
leave=False)):
ds_train, ds_test = chds
models_train = init_models(ds_train, **model_init_kwargs)
models_test = init_models(ds_test, mode='test', **model_init_kwargs)
# iterate models
for j, (model_train, model_test) in enumerate(tqdm(zip(models_train, models_test),
desc='iterating models',
leave=False,
total=len(models_train))):
params_file = os.path.join(loc_trained_params,
'snr_vs_lp',
save_subdirname,
'params_%d_%d_%d' % (ch, i, j)) # params_{ch}_{lp}_{mod}
if use_pretrained_params:
params = util.load_variable(params_file)
else:
# use trained params of the 3rd last batch, as tailing samples are corrupted by CD
params_queue = [None] * 3
# iterate the training steps
for _, p, _ in gb.train(model_train, ds_train, n_iter=2000):
params_queue.append(p)
params = params_queue.pop(0)
if save_params:
util.save_variable(params, params_file)
test_Q = gb.test(model_test, params, ds_test)[0].QSq.total
# collect result
df_test_res = df_test_res.append({'ChInd': ch,
'LPdBm': ds_test.a['lpdbm'],
'Model': model_test.name,
'Q': test_Q},
ignore_index=True)
return df_test_res
# it may take a while to finish
kwargs = {'save_subdirname': 'regular_taps',
'use_pretrained_params': True, # use pretrained parameters to save time
'save_params': False} # save trained parameters after training
for ch in tqdm(1 + np.arange(7), desc='sweeping channels'):
df_test_res = sweep_channel(ch, df_test_res, **kwargs)
df_test_res
# save results
df_test_res.to_csv('benchmark_regular_taps.csv', index=False)
grp_ch = df_test_res.loc[df_test_res['ChInd'].isin([1, 4])].groupby('ChInd')
for n_ch, g_ch in grp_ch:
fig, ax = plt.subplots(figsize=(5, 3), dpi=300)
for n_mod, g_mod in g_ch.groupby('Model', sort=False):
ax.plot(g_mod.LPdBm, g_mod.Q, '-o', label=n_mod)
ax.legend(loc='upper left')
ax.set_title(f'Ch#{n_ch}')
ax.set_xlabel('Launched Power (dBm)')
ax.set_ylabel('Q-facotr (dB)')
fig, ax = plt.subplots(figsize=(5, 3), dpi=300)
fmt = ['o', '^', '<', 's', '*']
grp_opt_Q = df_test_res.groupby(['ChInd', 'Model'], as_index=False, sort=False)['Q'] \
.max().groupby('Model', sort=False)
for f, (n, g) in zip(fmt, grp_opt_Q):
ax.plot(g.ChInd, g.Q, f, label=n)
ax.legend()
ax.set_xlabel('channel index')
ax.set_ylabel('Q-factor (dB)')
ax.set_ylim([8.15, 9.5])
ax.set_title(r'Optimal $Q$-factor')
# it may take a while to finish
kwargs = {'save_subdirname': 'few_taps',
'use_pretrained_params': True, # use pretrained parameters to save time
'save_params': False,
'model_init_kwargs': {'dtaps': 221, 'ntaps': 11, 'xi': 0.5}}
df_test_res_ft = sweep_channel(4, df_test_res, **kwargs)
df_test_res_ft
fig, ax = plt.subplots(figsize=(5, 3), dpi=300)
for n_mod, g_mod in df_test_res_ft.groupby('Model', sort=False):
ax.plot(g_mod.LPdBm, g_mod.Q, '-o', label=n_mod)
ax.legend(loc='upper left')
ax.set_title(f'Ch4 (fewer taps)')
ax.set_xlabel('Launched Power (dBm)')
ax.set_ylabel('Q-facotr (dB)')
| 0.695028 | 0.604253 |
# Demo of the robustness score
Sources:
1. [Summary on the robustness of interpretability methods [Medium]](https://medium.com/@mohamadt/summary-on-the-robustness-of-interpretability-methods-ffb26ba22776)
2. [On the Robustness of Interpretability Methods [Arxiv]](https://arxiv.org/pdf/1806.08049.pdf)
```
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from lipschitz_metric import (lipschitz_metric,
get_perturbations,
explainer_func_wrapper)
```
## Combining LIME and Lipschitz robustness metric for classification
Let's import a dataset, train a simple classifier and use LIME to explain the model. Then, let's use the Lipschitz robustness score to check the stability of the explanations. To better illustrate the use case of the Lipschitz metric, notis that we set `tol=0.1`. This is a much higher tolerance than the default, and will stop the training earlier which in term leads to a less reliable model.
```
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from lime import lime_tabular
iris = load_iris(as_frame=True)
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y)
model = SVC(probability=True, tol=0.1) # <-- NB! tol=0.1
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred, target_names=iris.target_names))
lime_explainer = lime_tabular.LimeTabularExplainer(X_train.values, feature_names=iris.feature_names, class_names=list(iris.target_names), discretize_continuous=False, sample_around_instance=True)
x0 = X_test.iloc[0].values
exp = lime_explainer.explain_instance(x0, model.predict_proba)
print(f"Input: {x0}")
print(f"Explanations: {exp.as_list()}")
exp.as_pyplot_figure();
```
### Calculating the Lipschitz robustness metric for one instance
We can use the Lipschitz metric as a measure of the stability of the explanations given by LIME. To do this, we create a functions that does three steps:
1. Wrap the explainer method of LIME so that it input the input to the ML-model and outputs the feature contributions
2. Calculate some small, random perturbations away from the input and the corresponding LIME explanations
3. Use the perturbations to calculate the L-metric
```
def get_L_metric(x0, input_space, x_range=None, num_perturbations=100):
# Create a function that accepts the ML-model input and outputs the explainers feature contributions
explainer_func = explainer_func_wrapper(
predict_fn=model.predict_proba,
explainer_handle=lime_explainer,
explainer='lime'
)
# Calculate perturbations and the corresponding explanations
x0, exp0, x_perturbations, exp_perturbations = get_perturbations(
func=explainer_func,
x0=x0,
input_space=input_space,
num_perturbations=num_perturbations
)
# Calculate the L-metric (and return the input that caused this L-metric)
L, x_worst_case = lipschitz_metric(x0, exp0, x_perturbations, exp_perturbations, x_range=x_range)
exp_worst_case = explainer_func(x_worst_case)
return L, x0, exp0, x_worst_case, exp_worst_case
input_space = [(x_min, x_max) for x_min, x_max in zip(X_train.min(), X_train.max())]
x0 = X_test.iloc[0].values
L, x0, exp0, x_worst_case, exp_worst_case = get_L_metric(x0=x0, input_space=input_space, x_range=input_space)
print(f"Robustness score: {L:.2f}\n")
print(" Input Output")
print(f"Original: {str(x0):>60} --> {exp0}")
print(f"Worst-case: {str(x_worst_case):>60} --> {exp_worst_case}\n")
print(f"Diff: {str(x_worst_case-x0):>60} --> {exp_worst_case-exp0}")
```
### Calculating the Lipschitz robustness metric for one instance
The example above calculated the Lipschitz metric for *one* instance. Let us now do the same thing for the whole test set. This will reveal where LIME has unstable explanations.
(One could also sample the input space and calculate the Lipschitz metric for a larger area.)
```
input_space = [(x_min, x_max) for x_min, x_max in zip(X_train.min(), X_train.max())]
L_metric = []
x_worst_cases = {col: [] for col in X_test.columns}
for col, row in tqdm(X_test.iterrows(), total=X_test.shape[0]):
x0 = row.values
L, x0, exp0, x_worst_case, exp_worst_case = get_L_metric(x0=x0, input_space=input_space, x_range=input_space, num_perturbations=500)
L_metric.append(L)
for i, col in enumerate(x_worst_cases):
x_worst_cases[col].append(x_worst_case[i])
X_test_analysis = X_test.copy()
X_test_analysis['L-metric'] = L_metric
for col, values in x_worst_cases.items():
X_test_analysis[f'worst_case_{col}'] = values
X_test_analysis.head()
X_test_analysis['L-metric'].sort_values(ascending=False).plot.bar(figsize=(15,5), title='L-metric for explanations from X_test');
# Look closer at the point with the highest L-metric
worst_idx = X_test_analysis['L-metric'].argmax()
x0 = X_test_analysis.iloc[worst_idx, :4].values
x_perturbed = X_test_analysis.iloc[worst_idx, -4:].values
# Plot feature contribution for x0
lime_explainer.explain_instance(x0, model.predict_proba).as_pyplot_figure();
plt.title('Explanations for original input')
print(f"Input: {x0}")
# Plot feature contribution for perturbation
lime_explainer.explain_instance(x0, model.predict_proba).as_pyplot_figure();
plt.title('Explanations for small perturbation from input')
print(f"Perturbation: {x_perturbed}")
```
As seen above, the test set containts multiple points where the LIME explanations are unstable. When looking closer at the most unstable point, we see that the feature contributions change dramatically, even though the input has barely changed.
## Educational example
Implement the metric describing the robustness of an explanation model in terms of its constant L in the definition of “local Lipschitz continuity”. For the example, implement using a simple predict method.
**Definition 2.1 (from [2]):**
$$
f \subseteq \mathbb{R}^n \rightarrow \mathbb{R}^m \textrm{is \textbf{Locally Lipschitz} if for every } x_0 \textrm{ there exists } \delta > 0 \textrm{ and } L \in \mathbb{R} \textrm{ such that } \|x - x_0\| < \delta \textrm{ implies } \|f(x) - f(x_0)\| < L\|x - x_0\|
$$
In essence, this formula says that for all perturbations $x$ around a given point $x_0$, a function is Locally Lipschitz if there exists some number $L$ that ensures that the difference in output $\|f(x) - f(x_0)\|$ is less than $L$ multiplied by the difference in input $\|x - x_0\|$.
The paper argues that if we find the minimum value for $L$ that satisfies this condition, we can use that as a measure for stability.
Originally, the $L$ was meant to be calculated analytically. But since our ML-model cannot be expressed as an end-to-end differentiable function, the authors of [2] suggests using the following formula for approximating $L$:
$$
\tilde{L}_X(x_i) = \underset{x_j \in \mathcal{N}_\epsilon(x_i) \le \epsilon}{\mathrm{argmax}} \frac{\|f(x_i)-f(x_j)\|_2}{\|x_i-x_j\|_2}
$$
Where:
$$
\mathcal{N}_\epsilon(x_i) = \{ x_j \in X | \|x_i-x_j\| \le \epsilon \}
$$
The two formulas above basically mean that the stability metric $\tilde{L}_X(x_i)$ can be found by finding the largest deviation between the difference in both input and output, where the input should be based on a perturbations $x_j$ from a finite-sampled neighbourhood around $x_0$, such that the size of the perturbations are less than some small, "human-decided" number $\epsilon$
### An example
Testing the robustness score
Out function consists of two mathematical functions:
$$
\begin{align}
f_1(x) &= 3x_1 \\
f_2(x) &= 5x_2^2 + x_3
\end{align}
$$
The derivatives of these functions give us
$$
\begin{align}
\nabla f_1(x) &= \begin{bmatrix}
3 \\
0 \\
0
\end{bmatrix} \\
\nabla f_2(x) &= \begin{bmatrix}
0 \\
10x_2 \\
1
\end{bmatrix} \\
\end{align}
$$
Let's test two points, $(0,0,0)$ and $(0,1,0)$.
The point $(0,0,0)$ gives us the following derivatives:
$$
\begin{align}
\nabla f_1(0,0,0) &= \begin{bmatrix}
3 \\
0 \\
0
\end{bmatrix} \\
\nabla f_2(0,0,0) &= \begin{bmatrix}
0 \\
0 \\
1
\end{bmatrix} \\
\end{align}
$$
While the point $(0,1,0)$ gives us the following derivatives:
$$
\begin{align}
\nabla f_1(0,1,0) &= \begin{bmatrix}
3 \\
0 \\
0
\end{bmatrix} \\
\nabla f_2(0,1,0) &= \begin{bmatrix}
0 \\
10 \\
1
\end{bmatrix} \\
\end{align}
$$
The Lipschitz robustness score tries to find the largest derivative in any dimention for a given point.
From an analytical sense, this corresponds to `max(max(f1'(x*)), max(f2'(x*)))` for a given point $x^*$.
We should therefor expect the following results, when testing on our function, as $N \rightarrow \infty$:
```
lipschitz_robustness_score([0,0,0]) = 3
lipschitz_robustness_score([0,1,0]) = 10
```
```
predict = lambda x: np.array([3*x[:,0],
5*x[:,1]**2 + x[:,2]]).T
```
## Implementations
### Test using random samples
```
input_space = input_space = [(0, 100), (0, 3.14), (-5, 5)]
num_perturbations=100_000
x0s = np.array([[0,0,0], [0,1,0]])
for x0 in x0s:
x0 = x0.reshape(1,-1)
x0, y0, x_perturbations, y_perturbations = get_perturbations(predict, x0, input_space, num_perturbations=num_perturbations)
L, L_input = lipschitz_metric(x0, y0, x_perturbations, y_perturbations)
print(f"For x0={x0}, the robustness metric is {L:.2f} after {num_perturbations} perturbations, where the biggest gradient was found going from x0 to {L_input}")
```
### Test using pre-sampled perturbations
If the `predict()`-method is expensive to run, narrowing down the sample-space in a smart way could significantly reduce the execution time. Rather than sampling the input_space randomly, One could either create a grid-search or design a more deterministic approach based on knowledge about the data that only applied perturbations with meaningfull and desirable directions and magnitudes.
```
input_perturbations = np.array([
[ 0.1, 0, 0],
[ -0.1, 0, 0],
[ 0, 0.1, 0],
[ 0, -0.1, 0],
[ 0, 0, 0.1],
[ 0, 0, -0.1],
[ 0.01, 0.01, 0.01],
[-0.01, -0.01, -0.01],
])
x0 = np.array([[0,0,0]])
x0, y0, x_perturbations, y_perturbations = get_perturbations(predict, x0, input_perturbations=input_perturbations)
L, L_input = lipschitz_metric(x0, y0, x_perturbations, y_perturbations)
print(f"For x0={x0}, the robustness metric is {L:.2f}, where the biggest gradient was found going from x0 to {L_input}")
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from lipschitz_metric import (lipschitz_metric,
get_perturbations,
explainer_func_wrapper)
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from lime import lime_tabular
iris = load_iris(as_frame=True)
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y)
model = SVC(probability=True, tol=0.1) # <-- NB! tol=0.1
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred, target_names=iris.target_names))
lime_explainer = lime_tabular.LimeTabularExplainer(X_train.values, feature_names=iris.feature_names, class_names=list(iris.target_names), discretize_continuous=False, sample_around_instance=True)
x0 = X_test.iloc[0].values
exp = lime_explainer.explain_instance(x0, model.predict_proba)
print(f"Input: {x0}")
print(f"Explanations: {exp.as_list()}")
exp.as_pyplot_figure();
def get_L_metric(x0, input_space, x_range=None, num_perturbations=100):
# Create a function that accepts the ML-model input and outputs the explainers feature contributions
explainer_func = explainer_func_wrapper(
predict_fn=model.predict_proba,
explainer_handle=lime_explainer,
explainer='lime'
)
# Calculate perturbations and the corresponding explanations
x0, exp0, x_perturbations, exp_perturbations = get_perturbations(
func=explainer_func,
x0=x0,
input_space=input_space,
num_perturbations=num_perturbations
)
# Calculate the L-metric (and return the input that caused this L-metric)
L, x_worst_case = lipschitz_metric(x0, exp0, x_perturbations, exp_perturbations, x_range=x_range)
exp_worst_case = explainer_func(x_worst_case)
return L, x0, exp0, x_worst_case, exp_worst_case
input_space = [(x_min, x_max) for x_min, x_max in zip(X_train.min(), X_train.max())]
x0 = X_test.iloc[0].values
L, x0, exp0, x_worst_case, exp_worst_case = get_L_metric(x0=x0, input_space=input_space, x_range=input_space)
print(f"Robustness score: {L:.2f}\n")
print(" Input Output")
print(f"Original: {str(x0):>60} --> {exp0}")
print(f"Worst-case: {str(x_worst_case):>60} --> {exp_worst_case}\n")
print(f"Diff: {str(x_worst_case-x0):>60} --> {exp_worst_case-exp0}")
input_space = [(x_min, x_max) for x_min, x_max in zip(X_train.min(), X_train.max())]
L_metric = []
x_worst_cases = {col: [] for col in X_test.columns}
for col, row in tqdm(X_test.iterrows(), total=X_test.shape[0]):
x0 = row.values
L, x0, exp0, x_worst_case, exp_worst_case = get_L_metric(x0=x0, input_space=input_space, x_range=input_space, num_perturbations=500)
L_metric.append(L)
for i, col in enumerate(x_worst_cases):
x_worst_cases[col].append(x_worst_case[i])
X_test_analysis = X_test.copy()
X_test_analysis['L-metric'] = L_metric
for col, values in x_worst_cases.items():
X_test_analysis[f'worst_case_{col}'] = values
X_test_analysis.head()
X_test_analysis['L-metric'].sort_values(ascending=False).plot.bar(figsize=(15,5), title='L-metric for explanations from X_test');
# Look closer at the point with the highest L-metric
worst_idx = X_test_analysis['L-metric'].argmax()
x0 = X_test_analysis.iloc[worst_idx, :4].values
x_perturbed = X_test_analysis.iloc[worst_idx, -4:].values
# Plot feature contribution for x0
lime_explainer.explain_instance(x0, model.predict_proba).as_pyplot_figure();
plt.title('Explanations for original input')
print(f"Input: {x0}")
# Plot feature contribution for perturbation
lime_explainer.explain_instance(x0, model.predict_proba).as_pyplot_figure();
plt.title('Explanations for small perturbation from input')
print(f"Perturbation: {x_perturbed}")
lipschitz_robustness_score([0,0,0]) = 3
lipschitz_robustness_score([0,1,0]) = 10
predict = lambda x: np.array([3*x[:,0],
5*x[:,1]**2 + x[:,2]]).T
input_space = input_space = [(0, 100), (0, 3.14), (-5, 5)]
num_perturbations=100_000
x0s = np.array([[0,0,0], [0,1,0]])
for x0 in x0s:
x0 = x0.reshape(1,-1)
x0, y0, x_perturbations, y_perturbations = get_perturbations(predict, x0, input_space, num_perturbations=num_perturbations)
L, L_input = lipschitz_metric(x0, y0, x_perturbations, y_perturbations)
print(f"For x0={x0}, the robustness metric is {L:.2f} after {num_perturbations} perturbations, where the biggest gradient was found going from x0 to {L_input}")
input_perturbations = np.array([
[ 0.1, 0, 0],
[ -0.1, 0, 0],
[ 0, 0.1, 0],
[ 0, -0.1, 0],
[ 0, 0, 0.1],
[ 0, 0, -0.1],
[ 0.01, 0.01, 0.01],
[-0.01, -0.01, -0.01],
])
x0 = np.array([[0,0,0]])
x0, y0, x_perturbations, y_perturbations = get_perturbations(predict, x0, input_perturbations=input_perturbations)
L, L_input = lipschitz_metric(x0, y0, x_perturbations, y_perturbations)
print(f"For x0={x0}, the robustness metric is {L:.2f}, where the biggest gradient was found going from x0 to {L_input}")
| 0.793546 | 0.972831 |
```
!date
```
# Matrix Generation Clean Up
```
import anndata
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.patches as mpatches
import scanpy as scanp
from scipy.stats import ks_2samp, ttest_ind
from scipy.sparse import csr_matrix, lil_matrix
from sklearn.preprocessing import normalize
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
from umap import UMAP
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import NeighborhoodComponentsAnalysis
from matplotlib import cm
import warnings
warnings.filterwarnings('ignore')
fsize=20
plt.rcParams.update({'font.size': fsize})
%config InlineBackend.figure_format = 'retina'
txn = pd.read_csv("../../data/SMARTseq/out_gencode/transcripts_fixed.txt", sep="\t", header=None)
adata = "../../data/SMARTseq/out_gencode/adata.h5ad"
adata = anndata.read_h5ad(adata)
adata.var["transcript_id"] = txn[0].apply(lambda x: x.split(".")[0]).values
adata.var["transcript_name"] = txn[4].values
adata.var["gene_id"] = txn[1].apply(lambda x: x.split(".")[0]).values
adata.var["gene_name"] = txn[5].values
adata.var["length"] = txn[6].values
adata.var["gene_name"] = adata.var["gene_name"].astype(str) + "_" + adata.var.gene_id.astype(str)
adata.var["transcript_name"] = adata.var["transcript_name"].astype(str) + "_" + adata.var.transcript_id.astype(str)
def change(x):
if x=="L5 ET": return "L5 PT"
return x
raw = adata.X.todense()
scaled = raw/adata.var.length.values
adata.layers["X"] = csr_matrix(scaled)
adata.X = csr_matrix(scaled)
adata.layers["norm"] = normalize(adata.X, norm='l1', axis=1)*1000000
adata.layers["norm"][0].sum()
adata.layers["log1p"] = np.log1p(adata.layers["norm"])
adata.layers["norm"][0].sum()
adata.X = adata.layers["norm"]
adata.layers["norm"][0].sum()
adata.layers["norm"][0].sum()
del raw
del scaled
adata.layers["norm"][0].sum()
def group_mtx(mtx, components, features, s2t, source_id="transcript_id", target_id="gene_id", by="features"):
"""
mtx: ndarray components by features
components: labels for rows of mtx
features: labels for columns of mtx
s2t: pandas dataframe mapping source (features or components) to a
targets features(components) to group by
target_id: column name in s2t to group by
"""
if target_id not in s2t.columns: return -1
ncomp = components.shape[0]
nfeat = features.shape[0]
ntarget = s2t[target_id].nunique()
if by =="features":
source = features
elif by =="components":
source = components
# Map the source to an index
source2idx = dict(zip(source, range(len(source))))
# Map the target to a list of source indices
target2idx = (s2t.groupby(target_id)[source_id].apply(lambda x: [source2idx[i] for i in x])).to_dict()
# array of unique targets
unique = s2t[target_id].unique().astype(str)
nuniq = unique.shape[0]
X = np.zeros((ncomp, nuniq))
for tidx, t in enumerate(unique):
# Grab the matrix indices corresponding to columns and source columns to group by
source_indices = target2idx[t]
# breaks generality
sub_mtx = mtx[:, source_indices].sum(axis=1) # Sum on source indicies
X[:,tidx] = np.asarray(sub_mtx)[:,0] # place summed vector in new matrix
# Return matrix that is grouped by
return (X, components, unique)
def filter_mtx(mtx, components, features, **kwargs):
row_counts = kwargs.get("row_counts", 0) # threshold for min counts for rows
col_counts = kwargs.get("col_counts", 0)
row_zeros = kwargs.get("row_zeros", 0) # threshold min number of non_zero entries in rows
col_zeros = kwargs.get("col_zeros", 0)
return_mask = kwargs.get("return_mask", False)
row_sum = np.asarray(mtx.sum(axis=1)).reshape(-1) # sum along the rows
col_sum = np.asarray(mtx.sum(axis=0)).reshape(-1)
mtx_zero_mask = mtx>0
row_nz = np.asarray(mtx_zero_mask.sum(axis=1)).reshape(-1)
col_nz = np.asarray(mtx_zero_mask.sum(axis=0)).reshape(-1)
# Generate masks
rs_mask = row_sum > row_counts
cs_mask = col_sum > col_counts
rz_mask = row_nz > row_zeros
cz_mask = col_nz > col_zeros
row_mask = np.logical_and(rs_mask, rz_mask)
col_mask = np.logical_and(cs_mask, cz_mask)
if return_mask:
return (row_mask, col_mask)
X = mtx[row_mask,:][:,col_mask]
c = components[row_mask]
f = features[col_mask]
return (X, c, f)
%%time
mtx = np.array([[1,1,0],
[0,1,0],
[3,0,0],
[0,2,0]])
components = np.array([1,2,3,4])
features = np.array([1, 2, 3])
X, c, f = filter_mtx(mtx, components, features, row_zeros=1, col_zeros=3)
rm, cmask = filter_mtx(mtx, components, features, return_mask=True)
cmask
X
X==mtx
```
# Group isoforms into genes, and filter.
go back and filter on isoforms and apply it to genes
```
adata = adata
mtx = adata.layers["X"]
components = adata.obs.cell_id.values
features = adata.var.transcript_id.values
adata
%%time
mtx = adata.layers["X"].todense()
components = adata.obs.cell_id.values
features = adata.var.transcript_id.values
source_id = "transcript_id"
target_id = "gene_id"
s2t = adata.var
# Data for gene matrix
X, c, f = group_mtx(mtx, components, features, s2t)
adata
# generate isoform based on gene mask.
isoform = adata[:, adata.var.gene_id.isin(f)]
# generate gene
tmp = adata.var.drop_duplicates(["gene_id", "gene_name"])
tmp = tmp[tmp.gene_id.isin(f)]
gene = anndata.AnnData(X=X, obs=adata.obs, var=tmp)
print(isoform)
print(gene)
gene.var.index = gene.var.gene_name.values
isoform.var.index = isoform.var.transcript_name.values
```
# Begin Check
```
# the gene_id is OK, need to fix the gene name to reflected the fact that
# the same gene_name is used with multiple gene_ids
adata.var.gene_id.nunique() == gene.var.gene_name.nunique()
adata.var.transcript_id.nunique() == isoform.var.transcript_name.nunique()
gene.X = csr_matrix(gene.X)
gene.layers["X"] = gene.X.copy() # here, X is rho, the number of molecules
isoform.layers["X"] = isoform.X.copy() # here X is rho, the number of molecules
```
# Save matrix
```
gene.write_h5ad("../../data/notebook/revision/gencode_gene.h5ad")
isoform.write_h5ad("../../data/notebook/revision/gencode_isoform.h5ad")
```
|
github_jupyter
|
!date
import anndata
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.patches as mpatches
import scanpy as scanp
from scipy.stats import ks_2samp, ttest_ind
from scipy.sparse import csr_matrix, lil_matrix
from sklearn.preprocessing import normalize
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
from umap import UMAP
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import NeighborhoodComponentsAnalysis
from matplotlib import cm
import warnings
warnings.filterwarnings('ignore')
fsize=20
plt.rcParams.update({'font.size': fsize})
%config InlineBackend.figure_format = 'retina'
txn = pd.read_csv("../../data/SMARTseq/out_gencode/transcripts_fixed.txt", sep="\t", header=None)
adata = "../../data/SMARTseq/out_gencode/adata.h5ad"
adata = anndata.read_h5ad(adata)
adata.var["transcript_id"] = txn[0].apply(lambda x: x.split(".")[0]).values
adata.var["transcript_name"] = txn[4].values
adata.var["gene_id"] = txn[1].apply(lambda x: x.split(".")[0]).values
adata.var["gene_name"] = txn[5].values
adata.var["length"] = txn[6].values
adata.var["gene_name"] = adata.var["gene_name"].astype(str) + "_" + adata.var.gene_id.astype(str)
adata.var["transcript_name"] = adata.var["transcript_name"].astype(str) + "_" + adata.var.transcript_id.astype(str)
def change(x):
if x=="L5 ET": return "L5 PT"
return x
raw = adata.X.todense()
scaled = raw/adata.var.length.values
adata.layers["X"] = csr_matrix(scaled)
adata.X = csr_matrix(scaled)
adata.layers["norm"] = normalize(adata.X, norm='l1', axis=1)*1000000
adata.layers["norm"][0].sum()
adata.layers["log1p"] = np.log1p(adata.layers["norm"])
adata.layers["norm"][0].sum()
adata.X = adata.layers["norm"]
adata.layers["norm"][0].sum()
adata.layers["norm"][0].sum()
del raw
del scaled
adata.layers["norm"][0].sum()
def group_mtx(mtx, components, features, s2t, source_id="transcript_id", target_id="gene_id", by="features"):
"""
mtx: ndarray components by features
components: labels for rows of mtx
features: labels for columns of mtx
s2t: pandas dataframe mapping source (features or components) to a
targets features(components) to group by
target_id: column name in s2t to group by
"""
if target_id not in s2t.columns: return -1
ncomp = components.shape[0]
nfeat = features.shape[0]
ntarget = s2t[target_id].nunique()
if by =="features":
source = features
elif by =="components":
source = components
# Map the source to an index
source2idx = dict(zip(source, range(len(source))))
# Map the target to a list of source indices
target2idx = (s2t.groupby(target_id)[source_id].apply(lambda x: [source2idx[i] for i in x])).to_dict()
# array of unique targets
unique = s2t[target_id].unique().astype(str)
nuniq = unique.shape[0]
X = np.zeros((ncomp, nuniq))
for tidx, t in enumerate(unique):
# Grab the matrix indices corresponding to columns and source columns to group by
source_indices = target2idx[t]
# breaks generality
sub_mtx = mtx[:, source_indices].sum(axis=1) # Sum on source indicies
X[:,tidx] = np.asarray(sub_mtx)[:,0] # place summed vector in new matrix
# Return matrix that is grouped by
return (X, components, unique)
def filter_mtx(mtx, components, features, **kwargs):
row_counts = kwargs.get("row_counts", 0) # threshold for min counts for rows
col_counts = kwargs.get("col_counts", 0)
row_zeros = kwargs.get("row_zeros", 0) # threshold min number of non_zero entries in rows
col_zeros = kwargs.get("col_zeros", 0)
return_mask = kwargs.get("return_mask", False)
row_sum = np.asarray(mtx.sum(axis=1)).reshape(-1) # sum along the rows
col_sum = np.asarray(mtx.sum(axis=0)).reshape(-1)
mtx_zero_mask = mtx>0
row_nz = np.asarray(mtx_zero_mask.sum(axis=1)).reshape(-1)
col_nz = np.asarray(mtx_zero_mask.sum(axis=0)).reshape(-1)
# Generate masks
rs_mask = row_sum > row_counts
cs_mask = col_sum > col_counts
rz_mask = row_nz > row_zeros
cz_mask = col_nz > col_zeros
row_mask = np.logical_and(rs_mask, rz_mask)
col_mask = np.logical_and(cs_mask, cz_mask)
if return_mask:
return (row_mask, col_mask)
X = mtx[row_mask,:][:,col_mask]
c = components[row_mask]
f = features[col_mask]
return (X, c, f)
%%time
mtx = np.array([[1,1,0],
[0,1,0],
[3,0,0],
[0,2,0]])
components = np.array([1,2,3,4])
features = np.array([1, 2, 3])
X, c, f = filter_mtx(mtx, components, features, row_zeros=1, col_zeros=3)
rm, cmask = filter_mtx(mtx, components, features, return_mask=True)
cmask
X
X==mtx
adata = adata
mtx = adata.layers["X"]
components = adata.obs.cell_id.values
features = adata.var.transcript_id.values
adata
%%time
mtx = adata.layers["X"].todense()
components = adata.obs.cell_id.values
features = adata.var.transcript_id.values
source_id = "transcript_id"
target_id = "gene_id"
s2t = adata.var
# Data for gene matrix
X, c, f = group_mtx(mtx, components, features, s2t)
adata
# generate isoform based on gene mask.
isoform = adata[:, adata.var.gene_id.isin(f)]
# generate gene
tmp = adata.var.drop_duplicates(["gene_id", "gene_name"])
tmp = tmp[tmp.gene_id.isin(f)]
gene = anndata.AnnData(X=X, obs=adata.obs, var=tmp)
print(isoform)
print(gene)
gene.var.index = gene.var.gene_name.values
isoform.var.index = isoform.var.transcript_name.values
# the gene_id is OK, need to fix the gene name to reflected the fact that
# the same gene_name is used with multiple gene_ids
adata.var.gene_id.nunique() == gene.var.gene_name.nunique()
adata.var.transcript_id.nunique() == isoform.var.transcript_name.nunique()
gene.X = csr_matrix(gene.X)
gene.layers["X"] = gene.X.copy() # here, X is rho, the number of molecules
isoform.layers["X"] = isoform.X.copy() # here X is rho, the number of molecules
gene.write_h5ad("../../data/notebook/revision/gencode_gene.h5ad")
isoform.write_h5ad("../../data/notebook/revision/gencode_isoform.h5ad")
| 0.58948 | 0.798108 |
```
class Website:
pass
github = Website()
github
#We can say that github is "a Website instance", "a Website object" or "a Website"
class Website:
def initialize(self, url, founding_year, free_to_use):
self.url = url
self.founding_year = founding_year
self.free_to_use = free_to_use
def info(self):
print("URL:", self.url)
print("Founding year:", self.founding_year)
print("Free to use:", self.free_to_use)
github = Website()
github.initialize('https://github.com/', 2008, True)
```
In Python, there's a "magic" method that runs when we create a new Website by calling the Website class. It's called __init__ and it does nothing by default.
```
class Website:
def __init__(self, url, founding_year, free_to_use):
self.url = url
self.founding_year = founding_year
self.free_to_use = free_to_use
def info(self):
print("URL:", self.url)
print("Founding year:", self.founding_year)
print("Free to use:", self.free_to_use)
github = Website('https://github.com/', 2008, True)
github.info()
import datetime # we will use this for date objects
class Person:
def __init__(self, name, surname, birthdate, address, telephone, email):
self.name = name
self.surname = surname
self.birthdate = birthdate
self.address = address
self.telephone = telephone
self.email = email
def age(self):
today = datetime.date.today()
age = today.year - self.birthdate.year
if today < datetime.date(today.year, self.birthdate.month, self.birthdate.day):
age -= 1
return age
person = Person(
"Jane",
"Doe",
datetime.date(1992, 3, 12), # year, month, day
"No. 12 Short Street, Greenville",
"555 456 0987",
"jane.doe@example.com"
)
print(person.name)
print(person.email)
print(person.age())
import datetime # we will use this for date objects
class Person:
def __init__(self, name, surname, birthdate, address, telephone, email):
self.name = name
self.surname = surname
self.birthdate = birthdate
self.address = address
self.telephone = telephone
self.email = email
def age(self):
if hasattr(self, "_age"):
print("yes! has attribute")
return self._age
today = datetime.date.today()
age = today.year - self.birthdate.year
if today < datetime.date(today.year, self.birthdate.month, self.birthdate.day):
age -= 1
self._age = age
return age
person = Person(
"Jane",
"Doe",
datetime.date(1994, 3, 12), # year, month, day
"No. 12 Short Street, Greenville",
"555 456 0987",
"jane.doe@example.com"
)
print(person.name)
print(person.email)
print(person.age())
```
## Regular Methods, Static Method and Class Method
### Regular Methods:
Regular methods take instance as the first argument, apply_raise(self) is the regular method.
### Class Methods:
It takes class as argument. set_raise_amount(cls, amount), is class method. They pass class('cls') as the first
argument
### Static Methods:
Give away for static method: if you are not going to access any instance/class variable within that function.
There is no 'self'/'cls' variable within static function.
```
# https://raw.githubusercontent.com/CoreyMSchafer/code_snippets/master/Object-Oriented/3-Class-Static-Methods/oop.py
class Employee:
num_of_emps = 0
raise_amt = 1.04
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.email = first + '.' + last + '@email.com'
self.pay = pay
Employee.num_of_emps += 1
def fullname(self):
return '{} {}'.format(self.first, self.last)
def apply_raise(self):
self.pay = int(self.pay * self.raise_amt)
@classmethod
def set_raise_amt(cls, amount):
cls.raise_amt = amount
@classmethod
def from_string(cls, emp_str):
first, last, pay = emp_str.split('-')
return cls(first, last, pay)
@staticmethod
def is_workday(day):
if day.weekday() == 5 or day.weekday() == 6:
return False
return True
emp_1 = Employee('Corey', 'Schafer', 50000)
emp_2 = Employee('Test', 'Employee', 60000)
Employee.set_raise_amt(1.05)
print(Employee.raise_amt)
print(emp_1.raise_amt)
print(emp_2.raise_amt)
emp_str_1 = 'John-Doe-70000'
emp_str_2 = 'Steve-Smith-30000'
emp_str_3 = 'Jane-Doe-90000'
first, last, pay = emp_str_1.split('-')
#new_emp_1 = Employee(first, last, pay)
new_emp_1 = Employee.from_string(emp_str_1) # We can also say that we use, Class methods, as alternative contructor()
print(new_emp_1.email)
print(new_emp_1.pay)
import datetime
my_date = datetime.date(2016, 7, 11)
print(Employee.is_workday(my_date))
```
|
github_jupyter
|
class Website:
pass
github = Website()
github
#We can say that github is "a Website instance", "a Website object" or "a Website"
class Website:
def initialize(self, url, founding_year, free_to_use):
self.url = url
self.founding_year = founding_year
self.free_to_use = free_to_use
def info(self):
print("URL:", self.url)
print("Founding year:", self.founding_year)
print("Free to use:", self.free_to_use)
github = Website()
github.initialize('https://github.com/', 2008, True)
class Website:
def __init__(self, url, founding_year, free_to_use):
self.url = url
self.founding_year = founding_year
self.free_to_use = free_to_use
def info(self):
print("URL:", self.url)
print("Founding year:", self.founding_year)
print("Free to use:", self.free_to_use)
github = Website('https://github.com/', 2008, True)
github.info()
import datetime # we will use this for date objects
class Person:
def __init__(self, name, surname, birthdate, address, telephone, email):
self.name = name
self.surname = surname
self.birthdate = birthdate
self.address = address
self.telephone = telephone
self.email = email
def age(self):
today = datetime.date.today()
age = today.year - self.birthdate.year
if today < datetime.date(today.year, self.birthdate.month, self.birthdate.day):
age -= 1
return age
person = Person(
"Jane",
"Doe",
datetime.date(1992, 3, 12), # year, month, day
"No. 12 Short Street, Greenville",
"555 456 0987",
"jane.doe@example.com"
)
print(person.name)
print(person.email)
print(person.age())
import datetime # we will use this for date objects
class Person:
def __init__(self, name, surname, birthdate, address, telephone, email):
self.name = name
self.surname = surname
self.birthdate = birthdate
self.address = address
self.telephone = telephone
self.email = email
def age(self):
if hasattr(self, "_age"):
print("yes! has attribute")
return self._age
today = datetime.date.today()
age = today.year - self.birthdate.year
if today < datetime.date(today.year, self.birthdate.month, self.birthdate.day):
age -= 1
self._age = age
return age
person = Person(
"Jane",
"Doe",
datetime.date(1994, 3, 12), # year, month, day
"No. 12 Short Street, Greenville",
"555 456 0987",
"jane.doe@example.com"
)
print(person.name)
print(person.email)
print(person.age())
# https://raw.githubusercontent.com/CoreyMSchafer/code_snippets/master/Object-Oriented/3-Class-Static-Methods/oop.py
class Employee:
num_of_emps = 0
raise_amt = 1.04
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.email = first + '.' + last + '@email.com'
self.pay = pay
Employee.num_of_emps += 1
def fullname(self):
return '{} {}'.format(self.first, self.last)
def apply_raise(self):
self.pay = int(self.pay * self.raise_amt)
@classmethod
def set_raise_amt(cls, amount):
cls.raise_amt = amount
@classmethod
def from_string(cls, emp_str):
first, last, pay = emp_str.split('-')
return cls(first, last, pay)
@staticmethod
def is_workday(day):
if day.weekday() == 5 or day.weekday() == 6:
return False
return True
emp_1 = Employee('Corey', 'Schafer', 50000)
emp_2 = Employee('Test', 'Employee', 60000)
Employee.set_raise_amt(1.05)
print(Employee.raise_amt)
print(emp_1.raise_amt)
print(emp_2.raise_amt)
emp_str_1 = 'John-Doe-70000'
emp_str_2 = 'Steve-Smith-30000'
emp_str_3 = 'Jane-Doe-90000'
first, last, pay = emp_str_1.split('-')
#new_emp_1 = Employee(first, last, pay)
new_emp_1 = Employee.from_string(emp_str_1) # We can also say that we use, Class methods, as alternative contructor()
print(new_emp_1.email)
print(new_emp_1.pay)
import datetime
my_date = datetime.date(2016, 7, 11)
print(Employee.is_workday(my_date))
| 0.471467 | 0.637242 |
# 03.01 - MODELS FROM DATA
```
!wget --no-cache -O init.py -q https://raw.githubusercontent.com/rramosp/ai4eng.v1.20211.udea/main/content/init.py
import init; init.init(force_download=False); init.get_weblink()
```
## 1. Un modelo para datos (distribuciones) 1D
**Caso ideal**: Sabemos las distribuciones de las que vienen los datos. $\Rightarrow$ podemos calcular analíticamente nuestro modelo.
- ¿Podemos encontrar un modelo con 100% de acierto? ¿Por qué sí, o por qué no?
```
from local.lib import mlutils
from scipy import stats
from scipy import optimize
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import warnings
warnings.filterwarnings("ignore")
%matplotlib inline
## KEEPOUTPUT
d1 = stats.norm(loc=10,scale=2)
d2 = stats.norm(loc=17,scale=3)
x = np.linspace(0,30,100)
plt.plot(x, d1.pdf(x), color="red", label="pop 1")
plt.plot(x, d2.pdf(x), color="blue", label="pop 2")
plt.grid()
plt.legend();
## KEEPOUTPUT
x = np.linspace(5,25,1000)
minx = x[np.argmin(np.abs(d1.pdf(x)-d2.pdf(x)))]
print("frontera óptima en %.2f"%minx)
x = np.linspace(0,30,100)
plt.plot(x, d1.pdf(x), color="red", label="pop 1")
plt.plot(x, d2.pdf(x), color="blue", label="pop 2")
plt.axvline(minx, color="black", label="óptimo = %.2f"%minx)
plt.grid()
plt.legend();
```
#### cálculo analítico de los errores de clasificación
```
## KEEPOUTPUT
print("pop 1 error", 1-d1.cdf(minx))
print("pop 2 error", d2.cdf(minx))
```
**Caso real**: Tenemos una muestra de los datos
$\rightarrow$ ¿Cómo determinamos donde poner la frontera?
$\rightarrow$ ¿Qué frontera qusiéramos obtener?
```
## KEEPOUTPUT
from local.lib import mlutils
mlutils.show_1D_dataset_samples(100, d1, d2, n_datasets=1, dot_alpha=.5, line_alpha=0, figsize=(20,1))
plt.axis("on"); plt.ylim(.095, .105); plt.yticks([]);
plt.axhline(.1, color="black", alpha=.2)
## KEEPOUTPUT
mlutils.show_1D_dataset_samples(10, d1, d2, dot_alpha=.3)
## KEEPOUTPUT
mlutils.show_1D_dataset_samples(100, d1, d2, dot_alpha=.05)
## KEEPOUTPUT
mlutils.show_1D_dataset_samples(10000, d1, d2, dot_alpha=.001)
```
## 2. Un modelo para datos (distribuciones) 2D
- en 2D, un modelo de clasificación **es una frontera** en el plano
- **supongamos** que tenemos acceso a las distribuciones de las que surgen los datos $\rightarrow$ podemos muestrear tantas veces como queramos
- ¿cuál es la frontera que produce menos error de clasificación?
- $\epsilon$ es el error de clasificación calculado analíticamente **con la frontera óptima** porque conocemos las distribuciones que generan los datos.
- $\hat{\epsilon}$ es el error de clasificacón calculado con la muestra de datos y **con la frontera óptima** (conocida también como _frontera bayesiana_).
```
## KEEPOUTPUT
mc = mlutils.Example_Bayes2DClassifier(mean0=[1.5, 2.5], cov0=[[0.1, 0.], [0., 0.1]],
mean1=[1.5, 2.], cov1=[[0.2,0.1],[0,.2]])
X,y = mc.sample(500)
mlutils.plot_2Ddata_with_boundary(mc.predict, X, y, line_width=3, line_color="green", dots_alpha=.3)
plt.title(" $\hat{\epsilon}=%.3f$"%(1-mc.score(X,y))+" $\epsilon=%.3f$"%(1-mc.analytic_score()));
plt.grid();
```
haz los siguientes experimentos:
- separa los centros de las distribuciones de cada clase (`mean0`, `mean1`).
- aumenta y disminuye las matrices de covarianza.
- aumenta y disminuye el número de muestras.
- observa la estabilidad de $\hat{\epsilon}$ respecto a $\epsilon$ según ambas clases están más mezcladas o hay más o menos datos.
en general **SOLO TENEMOS UNA MUESTRA** de los datos, porque no tenemos conocimiento de las distribuciones que los generan. Los métodos de **estadística** y de **machine learning** están diseñados para esta situación.
Ejecuta la siguiente celda y pregúntate cada vez, **¿qué frontera establecerías con los datos que ves?**.
Fíjate que tenemos distintas muestras de una misma disitribución de base. Es decir, **la realidad detrás de estos datos siempre es la misma**.
**Aumenta** el número de muestras y hazte cada vez la misma pregunta.
```
## KEEPOUTPUT
X,y = mc.sample(300)
mlutils.plot_2Ddata(X, y, dots_alpha=.3)
plt.grid()
```
### Los algoritmos de machine learning:
- Los algoritmos de clasificación **calculan fronteras** entre los datos.
- Parten de una muestra de los datos, **no de las distribuciones**.
- Por tanto, **no conocemos** la forma de la frontera bayesiana.
O sea, **partimos ciegos**!!!!!!!
Para abordar esta situación, **cualquier algoritmo** tiene necesariamente que plantear una alguna suposición de base:
- los datos vienen de distribuciones normales.
- las columnas son estadísticamente independientes.
- la frontera es lineal, o cuadrática.
- la frontera se representa con una red neuronal.
Teniendo esto en cuenta, y **partiendo de una muestra de los datos**:
- el objetivo de un usuario de ML es acercarse lo más posible a la **frontera bayesiana** (que no sabemos cómo es).
- distintos algoritmos ML tienen **distintas capacidades** para modelar fronteras (un clasificador lineal no puede captuarar una frontera cuadrática).
- necesitamos dos tipos de herramientas:
- una buena colección de algoritmos ML.
- métodos para saber qué tan cerca estamos de la frontera bayesiana.
### observa cómo un clasificador lineal aproxima la frontera con diferentes tamaños de muestras
- cambia el parámetro `n_samples` y experimenta con el siguiente código.
- usa luego estimadores distintos. P.ej.
estimator = SVC(gamma=1)
estimator = RandomForestClassifier()
estimator = SVC(gamma=100)
Hazte las siguientes preguntas:
- ¿qué complejidad es capaz de capturar un estimador?
```
## KEEPOUTPUT
n_samples = 50
estimator = LogisticRegression()
estimator = SVC(gamma=100)
plt.figure(figsize=(15,3))
for i in range(3):
plt.subplot(1,3,i+1)
mlutils.plot_estimator_border(mc, estimator, mins=[0,-1], maxs=[3,4], n_samples=n_samples, legend=False)
```
hacemos ahora el experimento más exhaustivo repitiendo 20 veces el siguiente proceso:
- se muestrea el dataset (recuerda que **excepcionalmente** tenemos acceso a las distribuciones que generan los datos y por tanto podemos hacer esto).
- se calcula la fronter de clasificación obtenida por el estimador usando el dataset muestreado.
- se pinta esta frontera (en negro).
- en verde se muestra la **frontera bayesiana**.
```
## KEEPOUTPUT
mlutils.sample_borders(mc, estimator, samples = [10,50,100,500], n_reps=20, mins=[0,-1], maxs=[3,4])
```
## 3. Solo tenemos un dataset
- remuestrea, reentrena para medir el desempeño y entender la estabilidad
- prueba con `test_pct` entre 0.1 y 0.9. observa la divergencia entre train y test cuando `test_pct` es más cercano a 0.9. ¿por qué?
- prueba con `SVC(gamma=100)`. En este caso observa la divergencia entre train y test incluso cuando `test_pct` es pequeño. ¿por qué?
- prubea además con `dataset_size` entre 20 y 500
Entiende la diferencia entre **tener pocos datos** y **overfitting**
Configuraciones interesantes sobre
mc = mlutils.Example_Bayes2DClassifier(mean0=[1.5, 1.5], cov0=[[0.5, 0.1], [0.3, 0.5]],
mean1=[1.2, 2.], cov1=[[0.2,0.1],[0,.5]])
- SVC $\gamma=100$, `dataset_size=500` (overfitting con pocos datos, convergencia con muchos)
- SVC $\gamma=100$, `dataset_size=2000` (overfitting con pocos datos, convergencia con muchos)
- SVC $\gamma=.01$, `dataset_size=100` (variabilidad siempre, convergencia promedio con pocos datos)
- SVC $\gamma=1$, `dataset_size=100` (variabilidad siempre, convergencia promedio con pocos datos)
- LinearRegression, `dataset_size=100` (nunca converge a la frontera bayesiana)
```
## KEEPOUTPUT
dataset_size = 200
mc = mlutils.Example_Bayes2DClassifier(mean0=[1.5, 1.5], cov0=[[4., 0.5], [0.1, 4.]],
mean1=[1.5,4.], cov1=[[1,0.5],[0.1,1.]])
X,y = mc.sample(dataset_size)
mlutils.plot_estimator_border(mc, n_samples=dataset_size, legend=False)
analitic_score = mc.analytic_score()
```
realizamos un muestreo para dividir entre train y test
```
## KEEPOUTPUT
test_pct = .3
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=test_pct)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
## KEEPOUTPUT
plt.figure(figsize=(10,3))
plt.subplot(121)
mlutils.plot_2Ddata(X_train, y_train, dots_alpha=.3)
plt.title("train data")
plt.grid()
plt.subplot(122)
mlutils.plot_2Ddata(X_test, y_test, dots_alpha=.3)
plt.grid()
plt.title("test data")
#estimator = SVC(gamma=1)
estimator = SVC(gamma=100)
#estimator = LogisticRegression()
#estimator = RandomForestClassifier()
estimator.fit(X_train, y_train)
## KEEPOUTPUT
print("accuracy train %.2f"%estimator.score(X_train,y_train))
tr_preds = estimator.predict(X_train)
print("predicciones para train")
print(tr_preds)
print("ground truth para train")
print(y_train)
## KEEPOUTPUT
print("\naccuracy test %.2f"%estimator.score(X_test,y_test))
ts_preds = estimator.predict(X_test)
print("predicciones para test")
print(ts_preds)
print("ground truth para test")
print(y_test)
## KEEPOUTPUT
estimator = SVC(gamma=.001)
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=.1)
estimator.fit(X_train, y_train)
trsc = estimator.score(X_train, y_train)
tssc = estimator.score(X_test, y_test)
print("train_score %5.2f"%estimator.score(X_train, y_train))
print("test_score %5.2f"%estimator.score(X_test, y_test))
plt.figure(figsize=(10,3))
plt.subplot(121)
mlutils.plot_2Ddata(X_train, y_train, dots_alpha=.3)
mlutils.plot_2D_boundary(estimator.predict, np.min(X, axis=0), np.max(X, axis=0),
line_width=3, line_alpha=.7, label=None)
plt.title("train accuracy %.5f"%estimator.score(X_train, y_train))
mlutils.plot_2D_boundary(mc.predict, np.min(X, axis=0), np.max(X, axis=0),
line_width=1, line_alpha=1., line_color="green", label="bayes boundary")
plt.subplot(122)
mlutils.plot_2Ddata(X_test, y_test, dots_alpha=.3)
mlutils.plot_2D_boundary(estimator.predict, np.min(X, axis=0), np.max(X, axis=0),
line_width=3, line_alpha=.7, label=None)
plt.title("test accuracy %.5f"%estimator.score(X_test, y_test))
mlutils.plot_2D_boundary(mc.predict, np.min(X, axis=0), np.max(X, axis=0),
line_width=1, line_alpha=1., line_color="green", label="bayes boundary")
```
## 4. Learning curves
normalmente tenemos datasets de muchas dimensiones (columnas) y no podemos visualizar los datos como en 2D $\rightarrow$ necesitamos métodos para recabar evidencia sobre si tenemos pocos datos, estamos haciendo overfitting, etc.
Las **curvas de aprendizaje** nos ayudan a esto.
Realiza el experimento desde la sección anterior, con distintos tamaños del dataset inicial y con distintos algoritmos.
```
X,y = mc.sample(500)
## KEEPOUTPUT
estimator = LogisticRegression()
mlutils.lcurve(estimator, X, y, n_reps=20, score_func=accuracy_score)
plt.axhline(analitic_score, lw=2, color="black", label="bayes score")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylim(0.7,1.0);
## KEEPOUTPUT
estimator = SVC(gamma=1)
mlutils.lcurve(estimator, X, y, n_reps=20, score_func=accuracy_score)
plt.axhline(analitic_score, lw=2, color="black", label="bayes score")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylim(0.7,1.0);
## KEEPOUTPUT
estimator = SVC(gamma=100)
mlutils.lcurve(estimator, X, y, n_reps=20, score_func=accuracy_score)
plt.axhline(analitic_score, lw=2, color="black", label="bayes score")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylim(0.4,1.0);
```
no olvides que normalmente **no conocemos la _frontera bayesiana_**, y por tanto no tenemos el _bayes score_
## Taxonomía de problemas de machine learning
- Supervisados
- Clasificación
- Regresión
- No supervisados
- Agrupamiento
- Estimación de densidad
- etc.
- Reinforcement learning
## Complejidad de los modelos vs. complejidad de los datos
```
from IPython.display import Image
Image(filename='local/imgs/bvc.png', width=600)
```
|
github_jupyter
|
!wget --no-cache -O init.py -q https://raw.githubusercontent.com/rramosp/ai4eng.v1.20211.udea/main/content/init.py
import init; init.init(force_download=False); init.get_weblink()
from local.lib import mlutils
from scipy import stats
from scipy import optimize
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import warnings
warnings.filterwarnings("ignore")
%matplotlib inline
## KEEPOUTPUT
d1 = stats.norm(loc=10,scale=2)
d2 = stats.norm(loc=17,scale=3)
x = np.linspace(0,30,100)
plt.plot(x, d1.pdf(x), color="red", label="pop 1")
plt.plot(x, d2.pdf(x), color="blue", label="pop 2")
plt.grid()
plt.legend();
## KEEPOUTPUT
x = np.linspace(5,25,1000)
minx = x[np.argmin(np.abs(d1.pdf(x)-d2.pdf(x)))]
print("frontera óptima en %.2f"%minx)
x = np.linspace(0,30,100)
plt.plot(x, d1.pdf(x), color="red", label="pop 1")
plt.plot(x, d2.pdf(x), color="blue", label="pop 2")
plt.axvline(minx, color="black", label="óptimo = %.2f"%minx)
plt.grid()
plt.legend();
## KEEPOUTPUT
print("pop 1 error", 1-d1.cdf(minx))
print("pop 2 error", d2.cdf(minx))
## KEEPOUTPUT
from local.lib import mlutils
mlutils.show_1D_dataset_samples(100, d1, d2, n_datasets=1, dot_alpha=.5, line_alpha=0, figsize=(20,1))
plt.axis("on"); plt.ylim(.095, .105); plt.yticks([]);
plt.axhline(.1, color="black", alpha=.2)
## KEEPOUTPUT
mlutils.show_1D_dataset_samples(10, d1, d2, dot_alpha=.3)
## KEEPOUTPUT
mlutils.show_1D_dataset_samples(100, d1, d2, dot_alpha=.05)
## KEEPOUTPUT
mlutils.show_1D_dataset_samples(10000, d1, d2, dot_alpha=.001)
## KEEPOUTPUT
mc = mlutils.Example_Bayes2DClassifier(mean0=[1.5, 2.5], cov0=[[0.1, 0.], [0., 0.1]],
mean1=[1.5, 2.], cov1=[[0.2,0.1],[0,.2]])
X,y = mc.sample(500)
mlutils.plot_2Ddata_with_boundary(mc.predict, X, y, line_width=3, line_color="green", dots_alpha=.3)
plt.title(" $\hat{\epsilon}=%.3f$"%(1-mc.score(X,y))+" $\epsilon=%.3f$"%(1-mc.analytic_score()));
plt.grid();
## KEEPOUTPUT
X,y = mc.sample(300)
mlutils.plot_2Ddata(X, y, dots_alpha=.3)
plt.grid()
## KEEPOUTPUT
n_samples = 50
estimator = LogisticRegression()
estimator = SVC(gamma=100)
plt.figure(figsize=(15,3))
for i in range(3):
plt.subplot(1,3,i+1)
mlutils.plot_estimator_border(mc, estimator, mins=[0,-1], maxs=[3,4], n_samples=n_samples, legend=False)
## KEEPOUTPUT
mlutils.sample_borders(mc, estimator, samples = [10,50,100,500], n_reps=20, mins=[0,-1], maxs=[3,4])
## KEEPOUTPUT
dataset_size = 200
mc = mlutils.Example_Bayes2DClassifier(mean0=[1.5, 1.5], cov0=[[4., 0.5], [0.1, 4.]],
mean1=[1.5,4.], cov1=[[1,0.5],[0.1,1.]])
X,y = mc.sample(dataset_size)
mlutils.plot_estimator_border(mc, n_samples=dataset_size, legend=False)
analitic_score = mc.analytic_score()
## KEEPOUTPUT
test_pct = .3
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=test_pct)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
## KEEPOUTPUT
plt.figure(figsize=(10,3))
plt.subplot(121)
mlutils.plot_2Ddata(X_train, y_train, dots_alpha=.3)
plt.title("train data")
plt.grid()
plt.subplot(122)
mlutils.plot_2Ddata(X_test, y_test, dots_alpha=.3)
plt.grid()
plt.title("test data")
#estimator = SVC(gamma=1)
estimator = SVC(gamma=100)
#estimator = LogisticRegression()
#estimator = RandomForestClassifier()
estimator.fit(X_train, y_train)
## KEEPOUTPUT
print("accuracy train %.2f"%estimator.score(X_train,y_train))
tr_preds = estimator.predict(X_train)
print("predicciones para train")
print(tr_preds)
print("ground truth para train")
print(y_train)
## KEEPOUTPUT
print("\naccuracy test %.2f"%estimator.score(X_test,y_test))
ts_preds = estimator.predict(X_test)
print("predicciones para test")
print(ts_preds)
print("ground truth para test")
print(y_test)
## KEEPOUTPUT
estimator = SVC(gamma=.001)
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=.1)
estimator.fit(X_train, y_train)
trsc = estimator.score(X_train, y_train)
tssc = estimator.score(X_test, y_test)
print("train_score %5.2f"%estimator.score(X_train, y_train))
print("test_score %5.2f"%estimator.score(X_test, y_test))
plt.figure(figsize=(10,3))
plt.subplot(121)
mlutils.plot_2Ddata(X_train, y_train, dots_alpha=.3)
mlutils.plot_2D_boundary(estimator.predict, np.min(X, axis=0), np.max(X, axis=0),
line_width=3, line_alpha=.7, label=None)
plt.title("train accuracy %.5f"%estimator.score(X_train, y_train))
mlutils.plot_2D_boundary(mc.predict, np.min(X, axis=0), np.max(X, axis=0),
line_width=1, line_alpha=1., line_color="green", label="bayes boundary")
plt.subplot(122)
mlutils.plot_2Ddata(X_test, y_test, dots_alpha=.3)
mlutils.plot_2D_boundary(estimator.predict, np.min(X, axis=0), np.max(X, axis=0),
line_width=3, line_alpha=.7, label=None)
plt.title("test accuracy %.5f"%estimator.score(X_test, y_test))
mlutils.plot_2D_boundary(mc.predict, np.min(X, axis=0), np.max(X, axis=0),
line_width=1, line_alpha=1., line_color="green", label="bayes boundary")
X,y = mc.sample(500)
## KEEPOUTPUT
estimator = LogisticRegression()
mlutils.lcurve(estimator, X, y, n_reps=20, score_func=accuracy_score)
plt.axhline(analitic_score, lw=2, color="black", label="bayes score")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylim(0.7,1.0);
## KEEPOUTPUT
estimator = SVC(gamma=1)
mlutils.lcurve(estimator, X, y, n_reps=20, score_func=accuracy_score)
plt.axhline(analitic_score, lw=2, color="black", label="bayes score")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylim(0.7,1.0);
## KEEPOUTPUT
estimator = SVC(gamma=100)
mlutils.lcurve(estimator, X, y, n_reps=20, score_func=accuracy_score)
plt.axhline(analitic_score, lw=2, color="black", label="bayes score")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylim(0.4,1.0);
from IPython.display import Image
Image(filename='local/imgs/bvc.png', width=600)
| 0.529993 | 0.888324 |
https://rosettacode.org/wiki/Euler_method
This notebook is about Chapter 8 "Plasticity and Learning" in Dayan and Abbott.
```
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import zscore
```
$v$ is the activity of the post-synaptic neuron, $u$ is the input pattern and $w$ is the synaptic weight.
$$v=w\cdot u$$
$$\tau\frac{dw}{dt}=v\cdot u$$
Let's use deconvolved calcium data from real neurons https://figshare.com/articles/Recording_of_19_000_neurons_across_mouse_visual_cortex_during_sparse_noise_stimuli/9505250 as input patterns to an artificial neuron $v$. We select the firing patterns of 1000 neurons as input patterns.
```
dat=np.load('/media/maria/DATA1/Documents/data_for_dayan_notebook/spks.npy')
random_neurons=np.random.randint(0,18795,1000)
print(random_neurons.shape)
input_patterns=dat[random_neurons,:]
del dat
print(input_patterns.shape)
#one timestep with random gaussian weights without plasticity
weights=np.random.normal(loc=0,size=(1000,))
v=np.dot(weights,input_patterns[:,1])
print(v)
#multiple time steps with random gaussian weights without plasticity
v_lst=[]
for j in range(1,100):
v_lst.append(np.dot(weights,input_patterns[:,j]))
plt.plot(v_lst)
#Using Euler's method to calculate the weight increments
h=0.001
input_patterns=zscore(input_patterns,axis=1)
v_lst=[]
w_lst=[]
w=np.random.normal(loc=0,size=(1000,))
v_lst=[np.dot(w,input_patterns[:,1])]
for j in range(2,500):
v_lst.append(np.dot(w,input_patterns[:,j]))
w=w+h*v_lst[-1]*input_patterns[:,j]
w=np.clip(w,a_min=-100,a_max=100)
w_lst.append(w)
w_arr=np.array(w_lst).T
print(w_arr.shape)
plt.plot(w_arr[0,:])
for j in range(0,10):
plt.plot(w_arr[j,:])
print(v_lst)
plt.plot(v_lst)
#Take PCA of the weights matrix
from sklearn.decomposition import PCA
pca=PCA(n_components=2)
X=pca.fit_transform(w_arr)
print(X.shape)
plt.scatter(X[:,0],X[:,1])
```
# Inputs with Gaussian statistics
```
mean=np.mean(input_patterns)
print(mean)
std=np.std(input_patterns)
print(std)
#Using Euler's method to calculate the weight increments
h=0.001
input_patterns=np.random.normal(0,1,(1000,500))
v_lst=[]
w_lst=[]
w=np.random.normal(loc=0,size=(1000,))
v_lst=[np.dot(w,input_patterns[:,1])]
for j in range(2,500):
v_lst.append(np.dot(w,input_patterns[:,j]))
w=w+h*v_lst[-1]*input_patterns[:,j]
w=np.clip(w,a_min=-100,a_max=100)
w_lst.append(w)
w_arr=np.array(w_lst).T
print(w_arr.shape)
plt.plot(w_arr[0,:])
for j in range(0,10):
plt.plot(w_arr[j,:100])
plt.plot(v_lst)
#Take PCA of the weights matrix
from sklearn.decomposition import PCA
pca=PCA(n_components=2)
X=pca.fit_transform(w_arr)
print(X.shape)
plt.scatter(X[:,0],X[:,1])
#Using Euler's method to calculate the weight increments
h=0.001
input_patterns=np.random.normal(1,1,(1000,500))
v_lst=[]
w_lst=[]
w=np.random.normal(loc=0,size=(1000,))
v_lst=[np.dot(w,input_patterns[:,1])]
for j in range(2,500):
v_lst.append(np.dot(w,input_patterns[:,j]))
w=w+h*v_lst[-1]*input_patterns[:,j]
w=np.clip(w,a_min=-100,a_max=100)
w_lst.append(w)
w_arr=np.array(w_lst).T
print(w_arr.shape)
plt.plot(w_arr[0,:])
for j in range(0,10):
plt.plot(w_arr[j,:100])
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import zscore
dat=np.load('/media/maria/DATA1/Documents/data_for_dayan_notebook/spks.npy')
random_neurons=np.random.randint(0,18795,1000)
print(random_neurons.shape)
input_patterns=dat[random_neurons,:]
del dat
print(input_patterns.shape)
#one timestep with random gaussian weights without plasticity
weights=np.random.normal(loc=0,size=(1000,))
v=np.dot(weights,input_patterns[:,1])
print(v)
#multiple time steps with random gaussian weights without plasticity
v_lst=[]
for j in range(1,100):
v_lst.append(np.dot(weights,input_patterns[:,j]))
plt.plot(v_lst)
#Using Euler's method to calculate the weight increments
h=0.001
input_patterns=zscore(input_patterns,axis=1)
v_lst=[]
w_lst=[]
w=np.random.normal(loc=0,size=(1000,))
v_lst=[np.dot(w,input_patterns[:,1])]
for j in range(2,500):
v_lst.append(np.dot(w,input_patterns[:,j]))
w=w+h*v_lst[-1]*input_patterns[:,j]
w=np.clip(w,a_min=-100,a_max=100)
w_lst.append(w)
w_arr=np.array(w_lst).T
print(w_arr.shape)
plt.plot(w_arr[0,:])
for j in range(0,10):
plt.plot(w_arr[j,:])
print(v_lst)
plt.plot(v_lst)
#Take PCA of the weights matrix
from sklearn.decomposition import PCA
pca=PCA(n_components=2)
X=pca.fit_transform(w_arr)
print(X.shape)
plt.scatter(X[:,0],X[:,1])
mean=np.mean(input_patterns)
print(mean)
std=np.std(input_patterns)
print(std)
#Using Euler's method to calculate the weight increments
h=0.001
input_patterns=np.random.normal(0,1,(1000,500))
v_lst=[]
w_lst=[]
w=np.random.normal(loc=0,size=(1000,))
v_lst=[np.dot(w,input_patterns[:,1])]
for j in range(2,500):
v_lst.append(np.dot(w,input_patterns[:,j]))
w=w+h*v_lst[-1]*input_patterns[:,j]
w=np.clip(w,a_min=-100,a_max=100)
w_lst.append(w)
w_arr=np.array(w_lst).T
print(w_arr.shape)
plt.plot(w_arr[0,:])
for j in range(0,10):
plt.plot(w_arr[j,:100])
plt.plot(v_lst)
#Take PCA of the weights matrix
from sklearn.decomposition import PCA
pca=PCA(n_components=2)
X=pca.fit_transform(w_arr)
print(X.shape)
plt.scatter(X[:,0],X[:,1])
#Using Euler's method to calculate the weight increments
h=0.001
input_patterns=np.random.normal(1,1,(1000,500))
v_lst=[]
w_lst=[]
w=np.random.normal(loc=0,size=(1000,))
v_lst=[np.dot(w,input_patterns[:,1])]
for j in range(2,500):
v_lst.append(np.dot(w,input_patterns[:,j]))
w=w+h*v_lst[-1]*input_patterns[:,j]
w=np.clip(w,a_min=-100,a_max=100)
w_lst.append(w)
w_arr=np.array(w_lst).T
print(w_arr.shape)
plt.plot(w_arr[0,:])
for j in range(0,10):
plt.plot(w_arr[j,:100])
| 0.558688 | 0.939969 |
### **Initialization**
I use these 3 lines of code on top of my each notebook because it won't cause any trouble while reloading or reworking on the Project or Problem. And the third line of code helps to make visualization within the Notebook.
```
# Initialization.
# I use these 3 lines of code on top of each Notebooks.
%reload_ext autoreload
%autoreload 2
%matplotlib inline
```
### **Downloading the Dependencies**
```
# Downloading all the necessary Libraries and Dependencies
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import re, math, graphviz, scipy
import seaborn as sns
# I will use XGboost in this Project because the Dataset has Timeseries Data.
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
from xgboost import XGBRegressor
from xgboost import plot_importance
# I will also use the Fastai API in this Project for Data Preprocessing and Data Preparation
from pandas.api.types import is_string_dtype, is_numeric_dtype
from IPython.display import display
from sklearn.ensemble import forest
from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import LabelEncoder, StandardScaler
from scipy.cluster import hierarchy as hc
from plotnine import *
from sklearn import metrics
from concurrent.futures import ProcessPoolExecutor
```
### **Getting the Data**
I have downloaded the Data from one of the **Kaggle** competition Dataset, **Predict Future Sales**. And I have used Google Colab so the act of reading Data might be different on different platforms.
```
# Loading the Data
# I am using Colab for this Project so accessing the Data might be different in different platforms.
path = "/content/drive/My Drive/Predict Future Sales"
# Creating the DataFrames using Pandas
transactions = pd.read_csv(os.path.join(path, "sales_train.csv.zip"))
items = pd.read_csv(os.path.join(path, "items.csv.zip"))
item_categories = pd.read_csv(os.path.join(path, "item_categories.csv"))
shops = pd.read_csv(os.path.join(path, "shops.csv"))
test = pd.read_csv(os.path.join(path, "test.csv.zip"))
```
### **Inspecting the Data**
Now, I am going to take the overview of each DataFrame defined above and I will walk through each process so you can gain more insights from it.
```
# Looking and Inspecting the Data
## Transactions DataFrame
display(transactions.head(3));
transactions.shape
```
Basically, Transactions DataFrame is a training Dataset. It contains numbers of columns or features. The **item_cnt_day** column is our target feature. We should convert it per month to match the competition overlook. And as we can see that **date** column is not in the datetime format and we should focus on converting it into datetime object while working with **Time Series** Data.
```
## Items DataFrame
display(items.head(3));
items.shape
```
Similarly, Items DataFrame contains different items name, items id and item category id.
```
## Item Categories DataFrame
display(item_categories.head(3));
item_categories.shape
## Shops DataFrame
display(shops.head(3));
shops.shape
# Test DataFrame
display(test.head());
test.shape
```
### **Preparing the DataFrame**
First, we should create a one common DataFrame for training the Mode. We can create a common DataFrame for trainig by merging all the DataFrames defined above except the Test DataFrame. In the process of merging the DataFrame I have gone through multiple Feature Engineering and Preprocessing steps which will enhance the Exploratory Data Analysis (EDA) of the Data.
```
# Merging the Transactions and Items DataFrame on "Item Id" column
train = pd.merge(transactions, items, on="item_id", how="left")
train.tail()
```
Though we can use join method to join two DataFrames. I prefer to use merge method because merge method of **Pandas** is more generalized form and and we don't have to apply suffix to the columns created as well.
We can merge two DataFrames on the common columns as you can see, I have merged Transactions and Items on **item_id** column and so on.
```
# Merging the Train, Item Categories and Shops DataFrame as well.
# Merging Train and Item Categories on "Item Category Id" column.
train_df = pd.merge(train, item_categories, on="item_category_id", how="left")
# Merging Train and Shops DataFrame on "Shop Id" column.
train_df = pd.merge(train_df, shops, on="shop_id", how="left")
train_df.head(10)
```
**Preprocessing and Feature Engineering**
Now, I am converting the date column into Datetime Object. Here, you can see that I have added format argument because the Data in date column is not properly organized so we need to pass the format argument if we end up getting an Error in fromat.
```
# Changing the Data column in Datetime Object
train_df["date"] = pd.to_datetime(train_df["date"], format="%d.%m.%Y")
train_df["date"].head()
# Working on Data Leakages
# Checking on Test DataFrame and Removing the Unnecessary Features
test_shops = test["shop_id"].unique()
test_items = test["item_id"].unique()
# Removing the Redundant Features
train_df = train_df[train_df["shop_id"].isin(test_shops)]
train_df = train_df[train_df["item_id"].isin(test_items)]
display(train_df.head()); train_df.shape
# Keeping only the Items whose price is greater than 0
train_df = train_df.query("item_price > 0")
# Creating the new features which contains the Items sold on a particulat month
# Item_cnt_day contains the number of Items sold
train_df["item_cnt_day"] = train_df["item_cnt_day"].clip(0, 20)
train_df = train_df.groupby(["date", "item_category_id", "shop_id", "item_id", "date_block_num"])
train_df = train_df.agg({'item_cnt_day':"sum", 'item_price':"mean"}).reset_index()
train_df = train_df.rename(columns={"item_cnt_day":'item_cnt_month'})
# Using clip(0, 20) to meet the requirements of the Competition
train_df["item_cnt_month"] = train_df["item_cnt_month"].clip(0, 20)
train_df.head()
```
### **Working on DataFrame using Fastai API**
**Fastai Library or API**
- [Fast.ai](https://www.fast.ai/about/) is the first deep learning library to provide a single consistent interface to all the most commonly used deep learning applications for vision, text, tabular data, time series, and collaborative filtering.
- [Fast.ai](https://www.fast.ai/about/) is a deep learning library which provides practitioners with high-level components that can quickly and easily provide state-of-the-art results in standard deep learning domains, and provides researchers with low-level components that can be mixed and matched to build new approaches.
**Preparing the Model**
- I have used [Fastai](https://www.fast.ai/about/) API to train the Model. It seems quite challenging to understand the code if you have never encountered with Fast.ai API before.
One important note for anyone who has never used Fastai API before is to go through [Fastai Documentation](https://docs.fast.ai/). And if you are using Fastai in Jupyter Notebook then you can use doc(function_name) to get the documentation instantly.
**Writing and Downloading the Dependencies**
* These Functions are already defined by Fastai and I have just copy and pasted from Fastai. Anybody with knowledge of its Implementation can use it. Fastai is an Open Source.
```
def proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None, prepoc_fn=None, max_n_cat=None,
subset=None, mapper=None):
if not ignore_flds: ignore_flds=[]
if not skip_flds: skip_flds=[]
if subset:
df = get_sample(df, subset)
else:
df = df.copy()
ignored_flds = df.loc[:, ignore_flds]
df.drop(ignore_flds, axis=1, inplace=True)
if prepoc_fn: prepoc_fn(df)
if y_fld is None: y=None
else:
if not is_numeric_dtype(df[y_fld]): df[y_fld] = pd.Categorical(df[y_fld]).codes
y = df[y_fld].values
skip_flds += [y_fld]
df.drop(skip_flds, axis=1, inplace=True)
if na_dict is None: na_dict = {}
else: na_dict = na_dict.copy()
na_dict_initial = na_dict.copy()
for n, c in df.items(): na_dict = fix_missing(df, c, n, na_dict)
if len(na_dict_initial.keys()) > 0:
df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)
if do_scale: mapper = scale_vars(df, mapper)
for n, c in df.items(): numericalize(df, c, n, max_n_cat)
df = pd.get_dummies(df, dummy_na=True)
df = pd.concat([ignored_flds, df], axis=1)
res = [df, y, na_dict]
if do_scale: res = res + [mapper]
return res
def fix_missing(df, col, name, na_dict):
if is_numeric_dtype(col):
if pd.isnull(col).sum() or (name in na_dict):
df[name + '_na'] = pd.isnull(col)
filler = na_dict[name] if name in na_dict else col.median()
df[name] = col.fillna(filler)
na_dict[name] = filler
return na_dict
def numericalize(df, col, name, max_n_cat):
if not is_numeric_dtype(col) and (max_n_cat is None or col.nunique()>max_n_cat):
df[name] = col.cat.codes+1
def get_sample(df, n):
idxs = sorted(np.random.permutation(len(df))[:n])
return df.iloc[idxs].copy()
def set_rf_samples(n):
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randit(0, n_samples, n))
def reset_rf_samples():
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randit(0, n_samples, n_samples))
def split_vals(a, n):
return a[:n].copy(), a[n:].copy()
def train_cats(df):
for n,c in df.items():
if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()
def train_cats(df):
for n,c in df.items():
if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()
def apply_cats(df, trn):
for n, c in df.items():
if trn[n].dtype.name == "category":
df[n] = pd.Categorical(c, categories = trn[n].cat.categories, ordered = True)
def add_datepart(df, fldnames, drop=True, time=False, errors="raise"):
if isinstance(fldnames, str):
fldnames = [fldnames]
for fldname in fldnames:
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)
targ_pre = re.sub("[Dd]ate$", '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elasped'] = fld.astype(np.int64) // 10**9
if drop: df.drop(fldname, axis=1, inplace=True)
def scale_vars(df, mapper):
warnings.filterwarnings("ignore", category = sklearn.exceptions.DataConversionWarning)
if mapper is None:
map_f = [([n], StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]
mapper = DataFrameMapper(map_f).fit(df)
df[mapper.transformed_names_] = mapper.transform(df)
return mapper
def rmse(x, y):
return math.sqrt(((x-y)**2).mean())
def print_score(m):
res = [rmse(m.predict(X_train), y_train),
rmse(m.predict(X_valid), y_valid),
m.score(X_train, y_train),
m.score(X_valid, y_valid)]
if hasattr(m, 'oob_score_'):
res.append(m.oob_score_)
print(res)
# Using add_datepart function
# This function is very useful while working on Time-Series Data
add_datepart(train_df, "date")
train_df.columns
# Observing the DataFrame again after applying API
train_df.head()
# Dealing with Categorical Features
train_cats(train_df)
# Checking for Null Values in DataFrame
train_df.isnull().sum().sort_index() / len(train_df)
os.makedirs("tmp", exist_ok=True)
train_df.to_feather("tmp/new")
```
### **Preparing the Model: XGBoost**
**Processing**
```
# Loading the Data and Going through simple Exploratory Data Analysis
data = pd.read_feather("tmp/new")
display(data.head(3));
data.shape
data.describe()
new_df, y, nas = proc_df(data, "item_cnt_month")
# Preparing the Validation Data
n_valid = 200000
n_trn = len(data) - n_valid
raw_train, raw_valid = split_vals(data, n_trn)
X_train, X_valid = split_vals(new_df, n_trn)
y_train, y_valid = split_vals(y, n_trn)
# Checking the Shape of Training and Validation Data
X_train.shape, X_valid.shape, y_train.shape, y_valid.shape
# Creating the Regressor Model
model = XGBRegressor(
max_depth=8,
n_estimators=1000,
min_child_weight=300,
colsample_bytree=0.8,
subsample=0.8,
eta=0.3,
seed=42
)
# Fitting the Model
model.fit(
X_train,
y_train,
eval_metric="rmse",
eval_set=[(X_train, y_train), (X_valid, y_valid)],
verbose=True,
early_stopping_rounds=10
)
```
**Preparing the Submission**
```
X_test = data[data["date_block_num"] == 33].drop(["item_cnt_month"], axis=1)
Y_test = model.predict(X_test)
submission = pd.DataFrame({
"ID": test["ID"].iloc[:49531],
"item_cnt_month": Y_test.clip(0, 20)
})
submission.to_csv('xgb_submission.csv', index=False)
```
|
github_jupyter
|
# Initialization.
# I use these 3 lines of code on top of each Notebooks.
%reload_ext autoreload
%autoreload 2
%matplotlib inline
# Downloading all the necessary Libraries and Dependencies
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import re, math, graphviz, scipy
import seaborn as sns
# I will use XGboost in this Project because the Dataset has Timeseries Data.
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
from xgboost import XGBRegressor
from xgboost import plot_importance
# I will also use the Fastai API in this Project for Data Preprocessing and Data Preparation
from pandas.api.types import is_string_dtype, is_numeric_dtype
from IPython.display import display
from sklearn.ensemble import forest
from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import LabelEncoder, StandardScaler
from scipy.cluster import hierarchy as hc
from plotnine import *
from sklearn import metrics
from concurrent.futures import ProcessPoolExecutor
# Loading the Data
# I am using Colab for this Project so accessing the Data might be different in different platforms.
path = "/content/drive/My Drive/Predict Future Sales"
# Creating the DataFrames using Pandas
transactions = pd.read_csv(os.path.join(path, "sales_train.csv.zip"))
items = pd.read_csv(os.path.join(path, "items.csv.zip"))
item_categories = pd.read_csv(os.path.join(path, "item_categories.csv"))
shops = pd.read_csv(os.path.join(path, "shops.csv"))
test = pd.read_csv(os.path.join(path, "test.csv.zip"))
# Looking and Inspecting the Data
## Transactions DataFrame
display(transactions.head(3));
transactions.shape
## Items DataFrame
display(items.head(3));
items.shape
## Item Categories DataFrame
display(item_categories.head(3));
item_categories.shape
## Shops DataFrame
display(shops.head(3));
shops.shape
# Test DataFrame
display(test.head());
test.shape
# Merging the Transactions and Items DataFrame on "Item Id" column
train = pd.merge(transactions, items, on="item_id", how="left")
train.tail()
# Merging the Train, Item Categories and Shops DataFrame as well.
# Merging Train and Item Categories on "Item Category Id" column.
train_df = pd.merge(train, item_categories, on="item_category_id", how="left")
# Merging Train and Shops DataFrame on "Shop Id" column.
train_df = pd.merge(train_df, shops, on="shop_id", how="left")
train_df.head(10)
# Changing the Data column in Datetime Object
train_df["date"] = pd.to_datetime(train_df["date"], format="%d.%m.%Y")
train_df["date"].head()
# Working on Data Leakages
# Checking on Test DataFrame and Removing the Unnecessary Features
test_shops = test["shop_id"].unique()
test_items = test["item_id"].unique()
# Removing the Redundant Features
train_df = train_df[train_df["shop_id"].isin(test_shops)]
train_df = train_df[train_df["item_id"].isin(test_items)]
display(train_df.head()); train_df.shape
# Keeping only the Items whose price is greater than 0
train_df = train_df.query("item_price > 0")
# Creating the new features which contains the Items sold on a particulat month
# Item_cnt_day contains the number of Items sold
train_df["item_cnt_day"] = train_df["item_cnt_day"].clip(0, 20)
train_df = train_df.groupby(["date", "item_category_id", "shop_id", "item_id", "date_block_num"])
train_df = train_df.agg({'item_cnt_day':"sum", 'item_price':"mean"}).reset_index()
train_df = train_df.rename(columns={"item_cnt_day":'item_cnt_month'})
# Using clip(0, 20) to meet the requirements of the Competition
train_df["item_cnt_month"] = train_df["item_cnt_month"].clip(0, 20)
train_df.head()
def proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None, prepoc_fn=None, max_n_cat=None,
subset=None, mapper=None):
if not ignore_flds: ignore_flds=[]
if not skip_flds: skip_flds=[]
if subset:
df = get_sample(df, subset)
else:
df = df.copy()
ignored_flds = df.loc[:, ignore_flds]
df.drop(ignore_flds, axis=1, inplace=True)
if prepoc_fn: prepoc_fn(df)
if y_fld is None: y=None
else:
if not is_numeric_dtype(df[y_fld]): df[y_fld] = pd.Categorical(df[y_fld]).codes
y = df[y_fld].values
skip_flds += [y_fld]
df.drop(skip_flds, axis=1, inplace=True)
if na_dict is None: na_dict = {}
else: na_dict = na_dict.copy()
na_dict_initial = na_dict.copy()
for n, c in df.items(): na_dict = fix_missing(df, c, n, na_dict)
if len(na_dict_initial.keys()) > 0:
df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)
if do_scale: mapper = scale_vars(df, mapper)
for n, c in df.items(): numericalize(df, c, n, max_n_cat)
df = pd.get_dummies(df, dummy_na=True)
df = pd.concat([ignored_flds, df], axis=1)
res = [df, y, na_dict]
if do_scale: res = res + [mapper]
return res
def fix_missing(df, col, name, na_dict):
if is_numeric_dtype(col):
if pd.isnull(col).sum() or (name in na_dict):
df[name + '_na'] = pd.isnull(col)
filler = na_dict[name] if name in na_dict else col.median()
df[name] = col.fillna(filler)
na_dict[name] = filler
return na_dict
def numericalize(df, col, name, max_n_cat):
if not is_numeric_dtype(col) and (max_n_cat is None or col.nunique()>max_n_cat):
df[name] = col.cat.codes+1
def get_sample(df, n):
idxs = sorted(np.random.permutation(len(df))[:n])
return df.iloc[idxs].copy()
def set_rf_samples(n):
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randit(0, n_samples, n))
def reset_rf_samples():
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randit(0, n_samples, n_samples))
def split_vals(a, n):
return a[:n].copy(), a[n:].copy()
def train_cats(df):
for n,c in df.items():
if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()
def train_cats(df):
for n,c in df.items():
if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()
def apply_cats(df, trn):
for n, c in df.items():
if trn[n].dtype.name == "category":
df[n] = pd.Categorical(c, categories = trn[n].cat.categories, ordered = True)
def add_datepart(df, fldnames, drop=True, time=False, errors="raise"):
if isinstance(fldnames, str):
fldnames = [fldnames]
for fldname in fldnames:
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)
targ_pre = re.sub("[Dd]ate$", '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elasped'] = fld.astype(np.int64) // 10**9
if drop: df.drop(fldname, axis=1, inplace=True)
def scale_vars(df, mapper):
warnings.filterwarnings("ignore", category = sklearn.exceptions.DataConversionWarning)
if mapper is None:
map_f = [([n], StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]
mapper = DataFrameMapper(map_f).fit(df)
df[mapper.transformed_names_] = mapper.transform(df)
return mapper
def rmse(x, y):
return math.sqrt(((x-y)**2).mean())
def print_score(m):
res = [rmse(m.predict(X_train), y_train),
rmse(m.predict(X_valid), y_valid),
m.score(X_train, y_train),
m.score(X_valid, y_valid)]
if hasattr(m, 'oob_score_'):
res.append(m.oob_score_)
print(res)
# Using add_datepart function
# This function is very useful while working on Time-Series Data
add_datepart(train_df, "date")
train_df.columns
# Observing the DataFrame again after applying API
train_df.head()
# Dealing with Categorical Features
train_cats(train_df)
# Checking for Null Values in DataFrame
train_df.isnull().sum().sort_index() / len(train_df)
os.makedirs("tmp", exist_ok=True)
train_df.to_feather("tmp/new")
# Loading the Data and Going through simple Exploratory Data Analysis
data = pd.read_feather("tmp/new")
display(data.head(3));
data.shape
data.describe()
new_df, y, nas = proc_df(data, "item_cnt_month")
# Preparing the Validation Data
n_valid = 200000
n_trn = len(data) - n_valid
raw_train, raw_valid = split_vals(data, n_trn)
X_train, X_valid = split_vals(new_df, n_trn)
y_train, y_valid = split_vals(y, n_trn)
# Checking the Shape of Training and Validation Data
X_train.shape, X_valid.shape, y_train.shape, y_valid.shape
# Creating the Regressor Model
model = XGBRegressor(
max_depth=8,
n_estimators=1000,
min_child_weight=300,
colsample_bytree=0.8,
subsample=0.8,
eta=0.3,
seed=42
)
# Fitting the Model
model.fit(
X_train,
y_train,
eval_metric="rmse",
eval_set=[(X_train, y_train), (X_valid, y_valid)],
verbose=True,
early_stopping_rounds=10
)
X_test = data[data["date_block_num"] == 33].drop(["item_cnt_month"], axis=1)
Y_test = model.predict(X_test)
submission = pd.DataFrame({
"ID": test["ID"].iloc[:49531],
"item_cnt_month": Y_test.clip(0, 20)
})
submission.to_csv('xgb_submission.csv', index=False)
| 0.521715 | 0.889721 |
```
# ms-python.python added
import os
try:
os.chdir(os.path.join(os.getcwd(), 'hw1'))
print(os.getcwd())
except:
pass
```
# Homework 1 - Renjie Zhu - A53266114
Data parsing using ```csv.DictReader```.
```
import csv
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
import scipy as sp
import random
data_filename = "amazon_reviews_us_Gift_Card_v1_00.tsv"
data = []
with open(data_filename, newline="") as data_file:
reader = csv.DictReader(data_file, delimiter="\t")
for row in reader:
data.append(row)
rating = {}
for ele in data:
if ele["star_rating"] not in rating.keys():
rating[ele["star_rating"]] = 0
else:
rating[ele["star_rating"]] += 1
```
1. As shown in the above cell,
5 stars : 129028,
4 stars : 9807,
3 stars : 3146,
2 stars : 1559,
1 stars : 4765,
```
rating_list = [(k,v) for k,v in rating.items()]
rating_list.sort()
rts, nums = zip(*rating_list)
plt.bar(rts,nums)
```
3.
To train a predictor defined as
$ R = \theta_0 + \theta_1 v + \theta_2 l $,
we have
$ R = \Theta \vec{x} = \begin{pmatrix} \theta_0 \\ \theta_1 \\ \theta_2 \end{pmatrix}
\begin{pmatrix} 1 \\ v \\ l \end{pmatrix}
$
where $R$ is rating, $v$ is $1$ if verified and $0$ otherwise, and $l$ is length of
the review.
```
def parse_XR(data):
X = []
R = []
for ele in data:
x = np.ones(3)
if ele["verified_purchase"].upper() != "Y":
x[1] = 0
x[2] = len(ele["review_body"])
X.append(x)
R.append(int(ele["star_rating"]))
X = np.array(X)
R = np.array(R)
return X, R
X, R = parse_XR(data)
t_3 = sp.linalg.lstsq(X, R)
print(f"We have theta_0 = {t_3[0][0]}, theta_1 = {t_3[0][1]}, theta_2 = {t_3[0][2]}.")
```
4.
$\theta_0$ is a value very close to 5. This is obvious from the previous distribution
where most reviews are given a five star. $\theta_1$ is a small positive number, and
since the possible value is only 0 or 1, this doesn't mean much in this situation.
$\theta_2$ is a even smaller number, but since the review length is usually a larger
number than 5, this is expected. $\theta_2$ is also negative, which means that
the longer the review, the lower the rating. An interpretation of this is people
tend to write a longer criticizing review for a bad purchase experience.
The predictor now only considers if the review is verified, so the problem becomes
$R = \Theta \vec{x} = \begin{pmatrix} \theta_0 \\ \theta_1 \end{pmatrix}
\begin{pmatrix} 1 \\ v \end{pmatrix}$
```
t_4 = sp.linalg.lstsq(X[:,:2],R)
print(f"We have theta_0 = {t_4[0][0]}, theta_1 = {t_4[0][1]}.")
```
If we do not consider the length of the review and focus only on if the
purchase is verified, the final score is now more affected than in the
previous problem, from 0.050 to 0.168. This tells us that a verified buyer
is more likely to give a higher rating though difference is small. It
may be indicating that a non-verified buyer is occationally giving very
low ratings to sabotage the product rating.
```
def split_train_test(data, per):
split = int(np.ceil(per * len(data)))
train_set = data[:split]
test_set = data[split:]
return train_set, test_set
```
5.
Splitting the data into two portions
```
train_set, test_set = split_train_test(data, 0.9)
X_t, R_t = parse_XR(train_set)
t_train = sp.linalg.lstsq(X_t[:,:2],R_t)
print(f"For the 90% training set, we have theta_0 = {t_train[0][0]}, theta_1 = {t_train[0][1]}.")
t_train = np.array(t_train[0])
pred_t = X_t[:,:2] @ t_train.reshape((2,1))
mse_t = np.mean((R_t.reshape((-1,1)) - pred_t) ** 2)
X_te, R_te = parse_XR(test_set)
pred_te = X_te[:,:2] @ t_train.reshape((2,1))
mse_test = np.mean((R_te.reshape((-1,1)) - pred_te) ** 2)
def train_predict(data, per):
train_set, test_set = split_train_test(data, per)
X_t, R_t = parse_XR(train_set)
X_te, R_te = parse_XR(test_set)
t_train = sp.linalg.lstsq(X_t[:,:2],R_t)
t_train = np.array(t_train[0])
pred_t = X_t[:,:2] @ t_train.reshape((2,1))
mse_t = np.mean((R_t.reshape((-1,1)) - pred_t) ** 2)
pred_te = X_te[:,:2] @ t_train.reshape((2,1))
mse_te = np.mean((R_te.reshape((-1,1)) - pred_te) ** 2)
return mse_t, mse_te
```
7.
We use different proportions for the train data.
```
train_percentage = np.linspace(0.05, 0.95, 57)
mse_train = []
mse_test = []
for per in list(train_percentage):
mse_t, mse_te = train_predict(data, per)
mse_train.append(mse_t)
mse_test.append(mse_te)
plt.plot(train_percentage, mse_train, label="train")
plt.plot(train_percentage, mse_test, label="test")
plt.legend()
plt.show
```
The size of the training set definitely has a great effect on the testing
performance. As shown in the graph above, we can see that for a larger testing
set, the testing performance goes down (looks like exponentially) and the
training error stays low. When the training set is too large, the training
can overfit for the training set. The
data is not shuffled and may have some trends lie under
the ordering of the data which can contribute to the poor testing performance
for a larger size training set.
```
def parse_VR(data):
X = []
R = []
for ele in data:
x = np.ones(3)
x[1] = int(ele["star_rating"])
x[2] = len(ele["review_body"])
X.append(x)
if ele["verified_purchase"].upper() != "Y":
R.append(0)
else:
R.append(1)
X = np.array(X)
R = np.array(R)
return X, R
X_t, R_t = parse_VR(train_set)
X_te, R_te = parse_VR(test_set)
model = linear_model.LogisticRegression()
model.fit(X_t, R_t)
pred_LR = model.predict(X_te)
correct_pred = (pred_LR == R_te)
accuracy = sum(correct_pred) / len(correct_pred)
print(f"The prediction accuracy is {accuracy*100}.")
label_p = sum(R_te) / len(R_te)
pred_p = sum(pred_LR) / len(R_te)
print(f"label positive: {label_p*100}, prediction positive: {pred_p*100}.")
```
8. The accuracy is pretty poor. The accuracy is basically how many positives
there are as the predictor is charging towards predicting all of them positive.
```
def parse_own(data):
X = []
R = []
for ele in data:
x = np.ones(5)
x[1] = int(ele["star_rating"])
x[2] = len(ele["review_body"])
if int(ele["total_votes"]) > 0:
x[3] = int(ele["helpful_votes"]) / int(ele["total_votes"])
else:
x[3] = 0
x[4] = ele["review_body"].count("!")
X.append(x)
if ele["verified_purchase"].upper() != "Y":
R.append(0)
else:
R.append(1)
X = np.array(X)
R = np.array(R)
return X, R
train_set, test_set = split_train_test(data, 0.9)
X_t, R_t = parse_own(train_set)
X_te, R_te = parse_own(test_set)
model = linear_model.LogisticRegression()
model.fit(X_t, R_t)
pred_LR = model.predict(X_te)
correct_pred = (pred_LR == R_te)
accuracy = sum(correct_pred) / len(correct_pred)
print(f"The prediction accuracy is {accuracy*100}.")
label_p = sum(R_te) / len(R_te)
pred_p = sum(pred_LR) / len(R_te)
print(f"label positive: {label_p*100}, prediction positive: {pred_p*100}.")
```
The new model is
$$
p(\text{review is verified})\approx \sigma (
\theta_0 + \theta_1 \times [\text{star rating}] + \theta_2 \times [review length]
+ \theta_3 \times \frac{[\text{helpful votes}]}{[\text{total votes}]}
+ \theta_4 \times [\text{vine}] + \theta_5 \times [\text{# of !'s in review body}]
)
$$
After closer inspection of the data, it seems that there are no vine users in the dataset
despite it being one of the fields. So, even though whether a user is a vine user can
be a good indicator of the validity of the review, this feature is taken out of the
equation above.
Also, the number of "!"'s in the review body is also probably not doing anything as most
doesn't have one.
The result is basically the same and is expected. Without understanding more of the review
body with some NLP techniques, other features are just useless and the training data is
just so heavily biased.
```
random.shuffle(data)
train_set, test_set = split_train_test(data, 0.9)
X_t, R_t = parse_own(train_set)
X_te, R_te = parse_own(test_set)
model = linear_model.LogisticRegression()
model.fit(X_t, R_t)
pred_LR = model.predict(X_te)
correct_pred = (pred_LR == R_te)
accuracy = sum(correct_pred) / len(correct_pred)
print(f"The prediction accuracy is {accuracy*100}.")
label_p = sum(R_te) / len(R_te)
pred_p = sum(pred_LR) / len(R_te)
print(f"label positive: {label_p*100}, prediction positive: {pred_p*100}.")
```
As described above, the training data is so heavily biased which leads to a trial with
shuffling the data. The results are gives a better accuracy (also more consistent
because of the shuffling). The predictor, however, still predicts positive for
almost all reviews.
|
github_jupyter
|
# ms-python.python added
import os
try:
os.chdir(os.path.join(os.getcwd(), 'hw1'))
print(os.getcwd())
except:
pass
import csv
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
import scipy as sp
import random
data_filename = "amazon_reviews_us_Gift_Card_v1_00.tsv"
data = []
with open(data_filename, newline="") as data_file:
reader = csv.DictReader(data_file, delimiter="\t")
for row in reader:
data.append(row)
rating = {}
for ele in data:
if ele["star_rating"] not in rating.keys():
rating[ele["star_rating"]] = 0
else:
rating[ele["star_rating"]] += 1
rating_list = [(k,v) for k,v in rating.items()]
rating_list.sort()
rts, nums = zip(*rating_list)
plt.bar(rts,nums)
def parse_XR(data):
X = []
R = []
for ele in data:
x = np.ones(3)
if ele["verified_purchase"].upper() != "Y":
x[1] = 0
x[2] = len(ele["review_body"])
X.append(x)
R.append(int(ele["star_rating"]))
X = np.array(X)
R = np.array(R)
return X, R
X, R = parse_XR(data)
t_3 = sp.linalg.lstsq(X, R)
print(f"We have theta_0 = {t_3[0][0]}, theta_1 = {t_3[0][1]}, theta_2 = {t_3[0][2]}.")
t_4 = sp.linalg.lstsq(X[:,:2],R)
print(f"We have theta_0 = {t_4[0][0]}, theta_1 = {t_4[0][1]}.")
def split_train_test(data, per):
split = int(np.ceil(per * len(data)))
train_set = data[:split]
test_set = data[split:]
return train_set, test_set
train_set, test_set = split_train_test(data, 0.9)
X_t, R_t = parse_XR(train_set)
t_train = sp.linalg.lstsq(X_t[:,:2],R_t)
print(f"For the 90% training set, we have theta_0 = {t_train[0][0]}, theta_1 = {t_train[0][1]}.")
t_train = np.array(t_train[0])
pred_t = X_t[:,:2] @ t_train.reshape((2,1))
mse_t = np.mean((R_t.reshape((-1,1)) - pred_t) ** 2)
X_te, R_te = parse_XR(test_set)
pred_te = X_te[:,:2] @ t_train.reshape((2,1))
mse_test = np.mean((R_te.reshape((-1,1)) - pred_te) ** 2)
def train_predict(data, per):
train_set, test_set = split_train_test(data, per)
X_t, R_t = parse_XR(train_set)
X_te, R_te = parse_XR(test_set)
t_train = sp.linalg.lstsq(X_t[:,:2],R_t)
t_train = np.array(t_train[0])
pred_t = X_t[:,:2] @ t_train.reshape((2,1))
mse_t = np.mean((R_t.reshape((-1,1)) - pred_t) ** 2)
pred_te = X_te[:,:2] @ t_train.reshape((2,1))
mse_te = np.mean((R_te.reshape((-1,1)) - pred_te) ** 2)
return mse_t, mse_te
train_percentage = np.linspace(0.05, 0.95, 57)
mse_train = []
mse_test = []
for per in list(train_percentage):
mse_t, mse_te = train_predict(data, per)
mse_train.append(mse_t)
mse_test.append(mse_te)
plt.plot(train_percentage, mse_train, label="train")
plt.plot(train_percentage, mse_test, label="test")
plt.legend()
plt.show
def parse_VR(data):
X = []
R = []
for ele in data:
x = np.ones(3)
x[1] = int(ele["star_rating"])
x[2] = len(ele["review_body"])
X.append(x)
if ele["verified_purchase"].upper() != "Y":
R.append(0)
else:
R.append(1)
X = np.array(X)
R = np.array(R)
return X, R
X_t, R_t = parse_VR(train_set)
X_te, R_te = parse_VR(test_set)
model = linear_model.LogisticRegression()
model.fit(X_t, R_t)
pred_LR = model.predict(X_te)
correct_pred = (pred_LR == R_te)
accuracy = sum(correct_pred) / len(correct_pred)
print(f"The prediction accuracy is {accuracy*100}.")
label_p = sum(R_te) / len(R_te)
pred_p = sum(pred_LR) / len(R_te)
print(f"label positive: {label_p*100}, prediction positive: {pred_p*100}.")
def parse_own(data):
X = []
R = []
for ele in data:
x = np.ones(5)
x[1] = int(ele["star_rating"])
x[2] = len(ele["review_body"])
if int(ele["total_votes"]) > 0:
x[3] = int(ele["helpful_votes"]) / int(ele["total_votes"])
else:
x[3] = 0
x[4] = ele["review_body"].count("!")
X.append(x)
if ele["verified_purchase"].upper() != "Y":
R.append(0)
else:
R.append(1)
X = np.array(X)
R = np.array(R)
return X, R
train_set, test_set = split_train_test(data, 0.9)
X_t, R_t = parse_own(train_set)
X_te, R_te = parse_own(test_set)
model = linear_model.LogisticRegression()
model.fit(X_t, R_t)
pred_LR = model.predict(X_te)
correct_pred = (pred_LR == R_te)
accuracy = sum(correct_pred) / len(correct_pred)
print(f"The prediction accuracy is {accuracy*100}.")
label_p = sum(R_te) / len(R_te)
pred_p = sum(pred_LR) / len(R_te)
print(f"label positive: {label_p*100}, prediction positive: {pred_p*100}.")
random.shuffle(data)
train_set, test_set = split_train_test(data, 0.9)
X_t, R_t = parse_own(train_set)
X_te, R_te = parse_own(test_set)
model = linear_model.LogisticRegression()
model.fit(X_t, R_t)
pred_LR = model.predict(X_te)
correct_pred = (pred_LR == R_te)
accuracy = sum(correct_pred) / len(correct_pred)
print(f"The prediction accuracy is {accuracy*100}.")
label_p = sum(R_te) / len(R_te)
pred_p = sum(pred_LR) / len(R_te)
print(f"label positive: {label_p*100}, prediction positive: {pred_p*100}.")
| 0.416797 | 0.847148 |
```
%matplotlib inline
from __future__ import print_function, division
import sys, os
import json
import math
import itertools, collections
import datetime as dt
import geopandas as gp
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_context("poster")
#sns.set_context("talk", font_scale=1.4)
current_palette = sns.color_palette(sns.hls_palette(6, l=.4, s=.9))
sns.set_palette(current_palette)
sns.palplot(current_palette)
palette = itertools.cycle(sns.color_palette())
```
We use Alaska Albers shapefiles all around to be able to measure distances in metres. The WGS84 shapefiles from AICC and the Forest Service Active Fire Maps web portal were converted using GDAL:
`ogr2ogr -f "ESRI Shapefile" outfile_AKAlbers.shp infile.shp -s_srs EPSG:4326 -t_SRS EPSG:3338`
### Data loading - fire events as per management agencies & remotely sensed hotspots
Fire GIS data from AICC. (*NOTE:* The AK Albers Shapefile for fire locations was generated not directly from the WGS84 AICC shapefile. The latter was found to be somewhat corrupted with fields out of date. I first transformed it to JSON-WGS84 and then generated the AK Albers Shapefile from the JSON. Data thus generated looks consistent.) These have been pre-processed and joined against fire perimeters.
```
datadir = "/Volumes/SCIENCE_mobile_Mac/Fire/DATA_BY_PROJECT/2015VIIRSMODIS/10_intermediate_products/"
fireperimshp = "2016cleanedFirePerimsWithFirepoints20170417.shp"
```
Active fire hotspots from FIRMS, also pre-processed:
```
mod14shp = "2016MOD14_Perims_AKAlbers_5km.shp"
viirsIshp = "2016VIIRSI_Perims_AKAlbers_5km.shp"
fireperimDF = gp.GeoDataFrame.from_file(os.path.join(datadir, fireperimshp))
mod14DF = gp.GeoDataFrame.from_file(os.path.join(datadir, mod14shp))
viirsIDF = gp.GeoDataFrame.from_file(os.path.join(datadir, viirsIshp))
```
We keep a list of columns for printing out data.
```
maincols = [u'FireName', u'CalcAcres', u'PrimFuel', u'MgmOption', u'InitBehave',
u'PerimDate', u'DiscDate', u'CntrlDate', u'OutDate']
numperims = len(fireperimDF)
print("Total number of fire perimeters: {}".format(numperims))
print("Number of fire perimeters with AFS ID: {}".format(numperims - sum(fireperimDF['AFSFire#'].isnull())))
print("Number of fire perimeters with DOF ID: {}".format(numperims - sum(fireperimDF['DOFFire#'].isnull())))
print("Number of fire perimeters with USFS ID: {}".format(numperims - sum(fireperimDF['USFSFire#'].isnull())))
cleanedfireperimDF = fireperimDF
cleanedfireperimDF.columns
```
### Inspecting the cleaned perimeter dataset, and last adjustments
Here is the reason I kept these columns. I'll change the case of the management options...
```
cleanedfireperimDF['PrimFuel'].value_counts()
cleanedfireperimDF['InitBehave'].value_counts()
```
We also need to ensure spelling of the properties is normalised:
```
cleanedfireperimDF['MgmOption'].value_counts()
cleanedfireperimDF['GenCause'].value_counts()
cleanedfireperimDF.sort_values(by="CalcAcres", ascending=False).head(n=10)
cleanedfireperimDF['OutDate'].max()
cleanedfireperimDF['DiscDate'].min()
cleanedfireperimDF['DiscDate'].max()
viirsIDF.shape
viirsIDF['ACQ_DATE'].min()
viirsIDF['ACQ_DATE'].max()
viirsIDF['LONGITUDE'].max()
cleanedfireperimDF['Comments']
cleanedfireperimDF['Comments'].str.contains('Landsat', case=False).sum()
cleanedfireperimDF['Comments'].str.contains('DOF', case=False).sum()
(cleanedfireperimDF['Comments'].str.contains('aerial', case=False) |
cleanedfireperimDF['Comments'].str.contains('helicopter', case=False)).sum()
cleanedfireperimDF['Comments'].isnull().sum()
mod14DF.shape
mod14DF['ACQ_DATE'].min()
mod14DF['ACQ_DATE'].max()
viirsIDF.columns
lateviirs = viirsIDF[viirsIDF['ACQ_DATE'] > '2016-09-31']
latemodis = mod14DF[mod14DF['ACQ_DATE'] > '2016-09-30']
earlyviirs = viirsIDF[viirsIDF['ACQ_DATE'] < '2016-04-01']
earlymodis = mod14DF[mod14DF['ACQ_DATE'] < '2016-04-01']
offviirs = viirsIDF[viirsIDF['infireperi'] == 0]
offmodis = mod14DF[mod14DF['infireperi'] == 0]
viirs_5km = viirsIDF[viirsIDF['5kmfireper'] == 1]
modis_5km = mod14DF[mod14DF['5kmfireper'] == 1]
viirsaleut = viirsIDF[(viirsIDF['LATITUDE'] < 58.0)
& (viirsIDF['LONGITUDE'] < -158.0)]
modaleut = mod14DF[(mod14DF['LATITUDE'] < 58.0)
& (mod14DF['LONGITUDE'] < -158.0)]
viirsprudhoe = viirsIDF[(viirsIDF['LATITUDE'] > 70.0)
& (viirsIDF['LONGITUDE'] < -148.0)
& (viirsIDF['LONGITUDE'] > -150.0)]
modisprudhoe = mod14DF[(mod14DF['LATITUDE'] > 70.0)
& (mod14DF['LONGITUDE'] < -148.0)
& (mod14DF['LONGITUDE'] > -150.0)]
viirsnotclose = viirsIDF[
(viirsIDF['infireperi'] == 0) &
(viirsIDF['5kmfireper'] == 0)]
modnotclose = mod14DF[
(mod14DF['infireperi'] == 0) &
(mod14DF['5kmfireper'] == 0)]
len(modnotclose)
earlyviirs.shape
earlymodis.shape
viirsaleut.shape
modaleut.shape
viirsprudhoe.shape
modisprudhoe.shape
offmodis.shape
offviirs.shape
viirs_5km.shape
modis_5km.shape
viirsIDF.shape
mod14DF.shape
len(viirsIDF[
(viirsIDF['infireperi'] == 0) &
(viirsIDF['5kmfireper'] == 1)])
len(viirsIDF[
(viirsIDF['infireperi'] == 0) &
(viirsIDF['5kmfireper'] == 1)])
mod14DF.groupby(['infireperi', '5kmfireper']).size()
viirsIDF.groupby(['infireperi', '5kmfireper']).size()
```
###### How many fires were not detected? Further data inspection
```
print("Number of undetected fires by MODIS: {}".format(sum(cleanedfireperimDF.MOD14count == 0)))
print(("Percentage of undetected fires: {:.2f}".format(100*sum(cleanedfireperimDF.MOD14count == 0)/len(cleanedfireperimDF))))
print(("Acreage of largest undetected fire: {}".format(max(cleanedfireperimDF.CalcAcres[cleanedfireperimDF.MOD14count == 0]))))
print("Number of fires not detected by MODIS Terra: {}".format(sum(cleanedfireperimDF.MOD14Tcoun == 0)))
print(("Percentage of fires not detected by MODIS Terra: {:.2f}".format(100*sum(cleanedfireperimDF.MOD14Tcoun == 0)/len(cleanedfireperimDF))))
print("Number of fires not detected by MODIS Aqua: {}".format(sum(cleanedfireperimDF.MOD14Acoun == 0)))
print(("Percentage of fires not detected by MODIS Aqua: {:.2f}".format(100*sum(cleanedfireperimDF.MOD14Acoun == 0)/len(cleanedfireperimDF))))
max(cleanedfireperimDF.MOD14count)
print("Number of undetected fires: {}".format(sum(cleanedfireperimDF.VIIRSIcoun == 0)))
print(("Percentage of undetected fires: {:.2f}".format(100*sum(cleanedfireperimDF.VIIRSIcoun == 0)/len(cleanedfireperimDF))))
print(("Acreage of largest undetected fire: {}".format(max(cleanedfireperimDF.CalcAcres[cleanedfireperimDF.VIIRSIcoun == 0]))))
max(cleanedfireperimDF.VIIRSIcoun)
mod14DF.groupby('infireperi').size()
mod14DF['infireperi'].value_counts()
viirsIDF['infireperi'].value_counts()
cleanedfireperimDF[cleanedfireperimDF.VIIRSIcoun == 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.VIIRSIcoun > 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.MOD14Acoun == 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.MOD14Acoun > 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.MOD14count == 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.MOD14count > 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.MOD14count==max(cleanedfireperimDF.MOD14count)][maincols]
keepperimcols = list(cleanedfireperimDF.columns)
keepperimcols.remove('geometry')
cleanedfireperimDF.columns
cleanedfireperimDF_forstats = cleanedfireperimDF[keepperimcols].copy()
cleanedfireperimDF['fireduration'] = cleanedfireperimDF.apply(
lambda row: (dt.datetime.strptime(row.OutDate, "%Y-%m-%d") - dt.datetime.strptime(row.DiscDate, "%Y-%m-%d")).days,
axis=1)
cleanedfireperimDF[cleanedfireperimDF['MOD14count'] > 0]['fireduration'].mean()
cleanedfireperimDF.groupby(["MgmOption"])["MOD14count"].mean()
cleanedfireperimDF.groupby(["MgmOption"])["CalcAcres"].mean()
```
## Preparing for further data analysis
Let's make a column that indicates black spruce vs. non black spruce fueled fires.
```
cleanedfireperimDF['blacksprucefire'] = cleanedfireperimDF['PrimFuel'] == "Black Spruce"
```
## Data inspection for badly detected fires etc.
```
displayperimcols = maincols + [u'VIIRSIcoun', u'MOD14count']
cleanedfireperimDF.columns
cleanedfireperimDF['Hectares'] = cleanedfireperimDF['CalcAcres'] / 2.47105
cleanedfireperimDF['sqkm'] = cleanedfireperimDF['Hectares'] / 100
```
### Statistics!
```
import numpy as np
import statsmodels.formula.api as smf
from pandas.tools.plotting import plot_frame
```
Somehow Pandas-style plotting doesn't work on GeoPandas GeoDataFrames. Until I find a better solution, I'm makeing a copy that's just a regular dataframe:
```
keepperimcols = list(cleanedfireperimDF.columns)
keepperimcols.remove('geometry')
cleanedfireperimDF_forstats = cleanedfireperimDF[keepperimcols].copy()
type(cleanedfireperimDF_forstats)
cleanedfireperimDF['Hectares'].describe()
results = smf.ols('VIIRSIcoun ~ MOD14count', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('MOD14count ~ CalcAcres', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('MOD14count ~ Hectares', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('MOD14count ~ sqkm', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('VIIRSIcoun ~ CalcAcres', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('VIIRSIcoun ~ Hectares', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('VIIRSIcoun ~ sqkm', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results.params
results = smf.ols('VIIRSIcoun ~ MOD14count', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
sns.set_context("paper", font_scale=2)
ax1 = sns.lmplot(x="Hectares", y="VIIRSIcoun",
size=5,
aspect=1.5,
data=cleanedfireperimDF);
sns.lmplot(x="Hectares", y="MOD14count",
data=cleanedfireperimDF,
size=5,
aspect=1.5,
);
ax1 = sns.regplot(x="Hectares", y="MOD14count",
data=cleanedfireperimDF,
line_kws={'zorder': 1}
);sns.regplot(x="Hectares", y="VIIRSIcoun",
data=cleanedfireperimDF,
line_kws={'zorder': 1},
ax=ax1
);
sns.set_context("poster", font_scale=1.8)
palette = itertools.cycle(sns.color_palette())
xlim = (-22, 500)
ylim = (-22, 1500)
ax1 = plot_frame(cleanedfireperimDF, kind='scatter',
x="MOD14count",
y="VIIRSIcoun",
xlim=xlim,
ylim=ylim,
s=60,
marker='o',
c='Hectares',
cmap="gist_stern_r",
vmin=0,
vmax=23000,
colorbar=True,
legend=True,
figsize=(15, 12),
sharex=False,
zorder=2
)
sns.regplot(
x="MOD14count",
y="VIIRSIcoun",
data=cleanedfireperimDF,
ax=ax1,
color="grey",
scatter=False,
line_kws={'zorder': 1}
)
ax1.set_xlabel('MODIS detections')
ax1.set_ylabel('VIIRS I-band detections')
f = plt.gcf()
gisoutdir = '/Volumes/SCIENCE_mobile_Mac/Fire/DATA_BY_PROJECT/2015VIIRSMODIS/GISout/'
outfn = 'fig05.png'
f.savefig(os.path.join(gisoutdir, outfn), dpi=300, bbox_inches='tight')
outfn = 'fig05_LR.png'
f.savefig(os.path.join(gisoutdir, outfn), dpi=150, bbox_inches='tight')
sns.set_context("poster", font_scale=1.6)
plotpalette = sns.xkcd_palette(['denim blue', 'apple green', ])
palette1 = itertools.cycle(plotpalette)
xlim = (-100, 23000)
ylim = (-100, 1500)
palette0 = itertools.cycle(sns.color_palette())
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(20, 8))
plot_frame(cleanedfireperimDF,
kind='scatter', x='Hectares', y="VIIRSIcoun", s=80, edgecolor='k', marker='D',
zorder=5, ax=ax0, label="VIIRS I-band", color=next(palette1))
plot_frame(cleanedfireperimDF,
kind='scatter', x='Hectares', y="MOD14count", s=80, edgecolor='k', marker='s',
zorder=5, ax=ax0, label="MODIS", color=next(palette1))
sns.regplot(x="Hectares", y="VIIRSIcoun",
data=cleanedfireperimDF,
line_kws={'zorder': 1},
color=next(palette1),
scatter=False,
ax=ax0
);
sns.regplot(x="Hectares", y="MOD14count",
data=cleanedfireperimDF,
line_kws={'zorder': 1},
color=next(palette1),
scatter=False,
ax=ax0
);
leg = ax0.legend(frameon=True)
leg.get_frame().set_facecolor('white')
ax0.set_xlabel('Hectares')
ax0.set_ylabel('Fire detections')
ax0.set_xlim(xlim)
ax0.set_ylim(ylim)
ax0.text(2000, 1300, 'a)')
#ax0.set_xticks([0, 25000, 50000, 75000, 100000, 125000])
# right
xlim = (-22, 500)
ylim = (-22, 1500)
plot_frame(cleanedfireperimDF, kind='scatter',
x="MOD14count",
y="VIIRSIcoun",
xlim=xlim,
ylim=ylim,
s=80,
edgecolor='k',
c='Hectares',
cmap="viridis_r",
vmin=0,
vmax=23000,
colorbar=True,
legend=True,
sharex=False,
zorder=2,
ax=ax1
)
sns.regplot(
x="MOD14count",
y="VIIRSIcoun",
data=cleanedfireperimDF,
ax=ax1,
color="grey",
scatter=False,
line_kws={'zorder': 1}
)
ax1.set_xlabel('MODIS detections')
ax1.set_ylabel('VIIRS I-band detections')
ax1.text(50, 1300, 'b)')
plt.tight_layout()
outfn = 'Fig05'
fig.savefig(os.path.join(gisoutdir, outfn + '_LR.png'), dpi=150, bb_inches='tight')
from PIL import Image
from io import BytesIO
fig.savefig(os.path.join(gisoutdir, outfn + '.png'), dpi=300, bb_inches='tight')
png = Image.open(os.path.join(gisoutdir, outfn + '.png'))
png.save(os.path.join(gisoutdir, outfn + '.tif'))
png.close()
```
|
github_jupyter
|
%matplotlib inline
from __future__ import print_function, division
import sys, os
import json
import math
import itertools, collections
import datetime as dt
import geopandas as gp
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_context("poster")
#sns.set_context("talk", font_scale=1.4)
current_palette = sns.color_palette(sns.hls_palette(6, l=.4, s=.9))
sns.set_palette(current_palette)
sns.palplot(current_palette)
palette = itertools.cycle(sns.color_palette())
datadir = "/Volumes/SCIENCE_mobile_Mac/Fire/DATA_BY_PROJECT/2015VIIRSMODIS/10_intermediate_products/"
fireperimshp = "2016cleanedFirePerimsWithFirepoints20170417.shp"
mod14shp = "2016MOD14_Perims_AKAlbers_5km.shp"
viirsIshp = "2016VIIRSI_Perims_AKAlbers_5km.shp"
fireperimDF = gp.GeoDataFrame.from_file(os.path.join(datadir, fireperimshp))
mod14DF = gp.GeoDataFrame.from_file(os.path.join(datadir, mod14shp))
viirsIDF = gp.GeoDataFrame.from_file(os.path.join(datadir, viirsIshp))
maincols = [u'FireName', u'CalcAcres', u'PrimFuel', u'MgmOption', u'InitBehave',
u'PerimDate', u'DiscDate', u'CntrlDate', u'OutDate']
numperims = len(fireperimDF)
print("Total number of fire perimeters: {}".format(numperims))
print("Number of fire perimeters with AFS ID: {}".format(numperims - sum(fireperimDF['AFSFire#'].isnull())))
print("Number of fire perimeters with DOF ID: {}".format(numperims - sum(fireperimDF['DOFFire#'].isnull())))
print("Number of fire perimeters with USFS ID: {}".format(numperims - sum(fireperimDF['USFSFire#'].isnull())))
cleanedfireperimDF = fireperimDF
cleanedfireperimDF.columns
cleanedfireperimDF['PrimFuel'].value_counts()
cleanedfireperimDF['InitBehave'].value_counts()
cleanedfireperimDF['MgmOption'].value_counts()
cleanedfireperimDF['GenCause'].value_counts()
cleanedfireperimDF.sort_values(by="CalcAcres", ascending=False).head(n=10)
cleanedfireperimDF['OutDate'].max()
cleanedfireperimDF['DiscDate'].min()
cleanedfireperimDF['DiscDate'].max()
viirsIDF.shape
viirsIDF['ACQ_DATE'].min()
viirsIDF['ACQ_DATE'].max()
viirsIDF['LONGITUDE'].max()
cleanedfireperimDF['Comments']
cleanedfireperimDF['Comments'].str.contains('Landsat', case=False).sum()
cleanedfireperimDF['Comments'].str.contains('DOF', case=False).sum()
(cleanedfireperimDF['Comments'].str.contains('aerial', case=False) |
cleanedfireperimDF['Comments'].str.contains('helicopter', case=False)).sum()
cleanedfireperimDF['Comments'].isnull().sum()
mod14DF.shape
mod14DF['ACQ_DATE'].min()
mod14DF['ACQ_DATE'].max()
viirsIDF.columns
lateviirs = viirsIDF[viirsIDF['ACQ_DATE'] > '2016-09-31']
latemodis = mod14DF[mod14DF['ACQ_DATE'] > '2016-09-30']
earlyviirs = viirsIDF[viirsIDF['ACQ_DATE'] < '2016-04-01']
earlymodis = mod14DF[mod14DF['ACQ_DATE'] < '2016-04-01']
offviirs = viirsIDF[viirsIDF['infireperi'] == 0]
offmodis = mod14DF[mod14DF['infireperi'] == 0]
viirs_5km = viirsIDF[viirsIDF['5kmfireper'] == 1]
modis_5km = mod14DF[mod14DF['5kmfireper'] == 1]
viirsaleut = viirsIDF[(viirsIDF['LATITUDE'] < 58.0)
& (viirsIDF['LONGITUDE'] < -158.0)]
modaleut = mod14DF[(mod14DF['LATITUDE'] < 58.0)
& (mod14DF['LONGITUDE'] < -158.0)]
viirsprudhoe = viirsIDF[(viirsIDF['LATITUDE'] > 70.0)
& (viirsIDF['LONGITUDE'] < -148.0)
& (viirsIDF['LONGITUDE'] > -150.0)]
modisprudhoe = mod14DF[(mod14DF['LATITUDE'] > 70.0)
& (mod14DF['LONGITUDE'] < -148.0)
& (mod14DF['LONGITUDE'] > -150.0)]
viirsnotclose = viirsIDF[
(viirsIDF['infireperi'] == 0) &
(viirsIDF['5kmfireper'] == 0)]
modnotclose = mod14DF[
(mod14DF['infireperi'] == 0) &
(mod14DF['5kmfireper'] == 0)]
len(modnotclose)
earlyviirs.shape
earlymodis.shape
viirsaleut.shape
modaleut.shape
viirsprudhoe.shape
modisprudhoe.shape
offmodis.shape
offviirs.shape
viirs_5km.shape
modis_5km.shape
viirsIDF.shape
mod14DF.shape
len(viirsIDF[
(viirsIDF['infireperi'] == 0) &
(viirsIDF['5kmfireper'] == 1)])
len(viirsIDF[
(viirsIDF['infireperi'] == 0) &
(viirsIDF['5kmfireper'] == 1)])
mod14DF.groupby(['infireperi', '5kmfireper']).size()
viirsIDF.groupby(['infireperi', '5kmfireper']).size()
print("Number of undetected fires by MODIS: {}".format(sum(cleanedfireperimDF.MOD14count == 0)))
print(("Percentage of undetected fires: {:.2f}".format(100*sum(cleanedfireperimDF.MOD14count == 0)/len(cleanedfireperimDF))))
print(("Acreage of largest undetected fire: {}".format(max(cleanedfireperimDF.CalcAcres[cleanedfireperimDF.MOD14count == 0]))))
print("Number of fires not detected by MODIS Terra: {}".format(sum(cleanedfireperimDF.MOD14Tcoun == 0)))
print(("Percentage of fires not detected by MODIS Terra: {:.2f}".format(100*sum(cleanedfireperimDF.MOD14Tcoun == 0)/len(cleanedfireperimDF))))
print("Number of fires not detected by MODIS Aqua: {}".format(sum(cleanedfireperimDF.MOD14Acoun == 0)))
print(("Percentage of fires not detected by MODIS Aqua: {:.2f}".format(100*sum(cleanedfireperimDF.MOD14Acoun == 0)/len(cleanedfireperimDF))))
max(cleanedfireperimDF.MOD14count)
print("Number of undetected fires: {}".format(sum(cleanedfireperimDF.VIIRSIcoun == 0)))
print(("Percentage of undetected fires: {:.2f}".format(100*sum(cleanedfireperimDF.VIIRSIcoun == 0)/len(cleanedfireperimDF))))
print(("Acreage of largest undetected fire: {}".format(max(cleanedfireperimDF.CalcAcres[cleanedfireperimDF.VIIRSIcoun == 0]))))
max(cleanedfireperimDF.VIIRSIcoun)
mod14DF.groupby('infireperi').size()
mod14DF['infireperi'].value_counts()
viirsIDF['infireperi'].value_counts()
cleanedfireperimDF[cleanedfireperimDF.VIIRSIcoun == 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.VIIRSIcoun > 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.MOD14Acoun == 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.MOD14Acoun > 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.MOD14count == 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.MOD14count > 0]['CalcAcres'].describe()
cleanedfireperimDF[cleanedfireperimDF.MOD14count==max(cleanedfireperimDF.MOD14count)][maincols]
keepperimcols = list(cleanedfireperimDF.columns)
keepperimcols.remove('geometry')
cleanedfireperimDF.columns
cleanedfireperimDF_forstats = cleanedfireperimDF[keepperimcols].copy()
cleanedfireperimDF['fireduration'] = cleanedfireperimDF.apply(
lambda row: (dt.datetime.strptime(row.OutDate, "%Y-%m-%d") - dt.datetime.strptime(row.DiscDate, "%Y-%m-%d")).days,
axis=1)
cleanedfireperimDF[cleanedfireperimDF['MOD14count'] > 0]['fireduration'].mean()
cleanedfireperimDF.groupby(["MgmOption"])["MOD14count"].mean()
cleanedfireperimDF.groupby(["MgmOption"])["CalcAcres"].mean()
cleanedfireperimDF['blacksprucefire'] = cleanedfireperimDF['PrimFuel'] == "Black Spruce"
displayperimcols = maincols + [u'VIIRSIcoun', u'MOD14count']
cleanedfireperimDF.columns
cleanedfireperimDF['Hectares'] = cleanedfireperimDF['CalcAcres'] / 2.47105
cleanedfireperimDF['sqkm'] = cleanedfireperimDF['Hectares'] / 100
import numpy as np
import statsmodels.formula.api as smf
from pandas.tools.plotting import plot_frame
keepperimcols = list(cleanedfireperimDF.columns)
keepperimcols.remove('geometry')
cleanedfireperimDF_forstats = cleanedfireperimDF[keepperimcols].copy()
type(cleanedfireperimDF_forstats)
cleanedfireperimDF['Hectares'].describe()
results = smf.ols('VIIRSIcoun ~ MOD14count', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('MOD14count ~ CalcAcres', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('MOD14count ~ Hectares', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('MOD14count ~ sqkm', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('VIIRSIcoun ~ CalcAcres', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('VIIRSIcoun ~ Hectares', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('VIIRSIcoun ~ sqkm', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results.params
results = smf.ols('VIIRSIcoun ~ MOD14count', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
sns.set_context("paper", font_scale=2)
ax1 = sns.lmplot(x="Hectares", y="VIIRSIcoun",
size=5,
aspect=1.5,
data=cleanedfireperimDF);
sns.lmplot(x="Hectares", y="MOD14count",
data=cleanedfireperimDF,
size=5,
aspect=1.5,
);
ax1 = sns.regplot(x="Hectares", y="MOD14count",
data=cleanedfireperimDF,
line_kws={'zorder': 1}
);sns.regplot(x="Hectares", y="VIIRSIcoun",
data=cleanedfireperimDF,
line_kws={'zorder': 1},
ax=ax1
);
sns.set_context("poster", font_scale=1.8)
palette = itertools.cycle(sns.color_palette())
xlim = (-22, 500)
ylim = (-22, 1500)
ax1 = plot_frame(cleanedfireperimDF, kind='scatter',
x="MOD14count",
y="VIIRSIcoun",
xlim=xlim,
ylim=ylim,
s=60,
marker='o',
c='Hectares',
cmap="gist_stern_r",
vmin=0,
vmax=23000,
colorbar=True,
legend=True,
figsize=(15, 12),
sharex=False,
zorder=2
)
sns.regplot(
x="MOD14count",
y="VIIRSIcoun",
data=cleanedfireperimDF,
ax=ax1,
color="grey",
scatter=False,
line_kws={'zorder': 1}
)
ax1.set_xlabel('MODIS detections')
ax1.set_ylabel('VIIRS I-band detections')
f = plt.gcf()
gisoutdir = '/Volumes/SCIENCE_mobile_Mac/Fire/DATA_BY_PROJECT/2015VIIRSMODIS/GISout/'
outfn = 'fig05.png'
f.savefig(os.path.join(gisoutdir, outfn), dpi=300, bbox_inches='tight')
outfn = 'fig05_LR.png'
f.savefig(os.path.join(gisoutdir, outfn), dpi=150, bbox_inches='tight')
sns.set_context("poster", font_scale=1.6)
plotpalette = sns.xkcd_palette(['denim blue', 'apple green', ])
palette1 = itertools.cycle(plotpalette)
xlim = (-100, 23000)
ylim = (-100, 1500)
palette0 = itertools.cycle(sns.color_palette())
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(20, 8))
plot_frame(cleanedfireperimDF,
kind='scatter', x='Hectares', y="VIIRSIcoun", s=80, edgecolor='k', marker='D',
zorder=5, ax=ax0, label="VIIRS I-band", color=next(palette1))
plot_frame(cleanedfireperimDF,
kind='scatter', x='Hectares', y="MOD14count", s=80, edgecolor='k', marker='s',
zorder=5, ax=ax0, label="MODIS", color=next(palette1))
sns.regplot(x="Hectares", y="VIIRSIcoun",
data=cleanedfireperimDF,
line_kws={'zorder': 1},
color=next(palette1),
scatter=False,
ax=ax0
);
sns.regplot(x="Hectares", y="MOD14count",
data=cleanedfireperimDF,
line_kws={'zorder': 1},
color=next(palette1),
scatter=False,
ax=ax0
);
leg = ax0.legend(frameon=True)
leg.get_frame().set_facecolor('white')
ax0.set_xlabel('Hectares')
ax0.set_ylabel('Fire detections')
ax0.set_xlim(xlim)
ax0.set_ylim(ylim)
ax0.text(2000, 1300, 'a)')
#ax0.set_xticks([0, 25000, 50000, 75000, 100000, 125000])
# right
xlim = (-22, 500)
ylim = (-22, 1500)
plot_frame(cleanedfireperimDF, kind='scatter',
x="MOD14count",
y="VIIRSIcoun",
xlim=xlim,
ylim=ylim,
s=80,
edgecolor='k',
c='Hectares',
cmap="viridis_r",
vmin=0,
vmax=23000,
colorbar=True,
legend=True,
sharex=False,
zorder=2,
ax=ax1
)
sns.regplot(
x="MOD14count",
y="VIIRSIcoun",
data=cleanedfireperimDF,
ax=ax1,
color="grey",
scatter=False,
line_kws={'zorder': 1}
)
ax1.set_xlabel('MODIS detections')
ax1.set_ylabel('VIIRS I-band detections')
ax1.text(50, 1300, 'b)')
plt.tight_layout()
outfn = 'Fig05'
fig.savefig(os.path.join(gisoutdir, outfn + '_LR.png'), dpi=150, bb_inches='tight')
from PIL import Image
from io import BytesIO
fig.savefig(os.path.join(gisoutdir, outfn + '.png'), dpi=300, bb_inches='tight')
png = Image.open(os.path.join(gisoutdir, outfn + '.png'))
png.save(os.path.join(gisoutdir, outfn + '.tif'))
png.close()
| 0.184731 | 0.680374 |
# Example notebook showcasing the use of the PseudomonasDotCom Scraper as a programmable interface to the pseudomonas.com database
## List of content
[Load required python modules](#Load-required-python-modules)
[Setting-things-up](#Setting-things-up)
[Retrieve-the-data](#Retrieve-the-data)
[Display the data](#Display-the-data)
[List what data is in the results](#List-all-keys-in-the-results-dict)
[Get the data for one queried gene](#Get-the-data-for-one-queried-gene)
[Display one table](#Display-one-table)
[Display a given table for all three genes](#Display-a-given-table-for-all-three-genes)
[Select all rows with a given value in one column](#Select-all-rows-with-a-given-value-in-one-column)
[Save to disk](#Save-to-disk)
[Read from disk](#Read-results-from-disk)
[Example with references](#Example-for-a-query-with-references)
[Display references](#Display-references-with-proper-html-links)
## Load required python modules
```
# The scraper
from GenDBScraper.PseudomonasDotComScraper import PseudomonasDotComScraper as scraper
# The query object (derived from collections.namedtuple)
from GenDBScraper.PseudomonasDotComScraper import pdc_query
# Regular expressions
import re
# pandas DataFrame, the workhorse datastructure
import pandas
```
## Setting things up
```
# We want to get data for three adjacent genes, pflu0915, pflu0916, pflu0917
queries = [pdc_query(strain='sbw25',feature=feat) for feat in ['pflu0915', 'pflu0916', 'pflu0917']]
# Set up the scraper
scraper = scraper(query=queries)
# Connect to the database
scraper.connect()
```
## Retrieve the data
```
results = scraper.run_query()
```
## Display the data
```
results
```
The results object is a two-fold nested dictionary:
results
|
+---sbw25_pflu0915
|
+---Gene Feature Overview
+---Cross References
+---Orthologs/Comparative Genomics
.
.
.
+---sbw25__pflu0916
|
+---Gene Feature Overview
+---Cross References
+---Orthologs/Comparative Genomics
.
.
.
+---sbw25__pflu0917
|
+---Gene Feature Overview
+---Cross References
+---Orthologs/Comparative Genomics
.
.
.
The lowest hierarchy ("Gene Feature Overview", "Cross References",
etc) are the data tables downloaded from pseudomonas.com. They are
instances of the pandas.DataFrame class, a highly versatile data
structure which allows many advanced dataset operations like slicing,
selection based on values and ranges, and much more.
### List all keys in the results dict
```
[k for k in results.keys()]
```
### Get the data for one queried gene
```
pflu0915_data = results['sbw25__pflu0915']
# List all keys in the first gene.
[k for k in pflu0915_data]
```
### Display one table
```
# Display the functional predictions from Interpro.
display(pflu0915_data['Functional Predictions from Interpro'])
```
### Display a given table for all three genes
```
# Display functional predictions from all three genes.
for f in results.keys():
print("\n\n")
print(f)
display(results[f]['Functional Predictions from Interpro'])
```
### Select all rows with a given value in one column
```
# Display all Pfam analysis
# Temporary list of data
tmp = []
# Iterate all three results.
for q,r in results.items():
# Take the functional predictions
f = r['Functional Predictions from Interpro']
# Select only rows where Analysis is "Pfam"
pfam = f[f['Analysis'] == 'Pfam']
# Add a column to denote the gene.
newcol = [q]*len(pfam)
pfam.insert(0, value=newcol, column="Feature")
# Append to the temporary holder.
tmp.append(pfam)
# Concatenate into one pandas DataFrame
tmp = pandas.concat(tmp)
display(tmp)
```
## Save to disk
```
scraper.to_json(results, outfile="sbw25.json")
```
## Read results from disk
```
loaded = scraper.from_json('sbw25.json')
loaded
```
## Example for a query with references
```
results_pa = scraper.run_query(query=pdc_query(strain='UCBPP-PA14', feature='PA14_67210'))
```
### Display references with proper html links
```
results_pa["UCBPP-PA14__PA14_67210"]['References'].style.format({'pubmed_url': lambda x: '<a href={0}>link</a>'.format(x)})
```
|
github_jupyter
|
# The scraper
from GenDBScraper.PseudomonasDotComScraper import PseudomonasDotComScraper as scraper
# The query object (derived from collections.namedtuple)
from GenDBScraper.PseudomonasDotComScraper import pdc_query
# Regular expressions
import re
# pandas DataFrame, the workhorse datastructure
import pandas
# We want to get data for three adjacent genes, pflu0915, pflu0916, pflu0917
queries = [pdc_query(strain='sbw25',feature=feat) for feat in ['pflu0915', 'pflu0916', 'pflu0917']]
# Set up the scraper
scraper = scraper(query=queries)
# Connect to the database
scraper.connect()
results = scraper.run_query()
results
[k for k in results.keys()]
pflu0915_data = results['sbw25__pflu0915']
# List all keys in the first gene.
[k for k in pflu0915_data]
# Display the functional predictions from Interpro.
display(pflu0915_data['Functional Predictions from Interpro'])
# Display functional predictions from all three genes.
for f in results.keys():
print("\n\n")
print(f)
display(results[f]['Functional Predictions from Interpro'])
# Display all Pfam analysis
# Temporary list of data
tmp = []
# Iterate all three results.
for q,r in results.items():
# Take the functional predictions
f = r['Functional Predictions from Interpro']
# Select only rows where Analysis is "Pfam"
pfam = f[f['Analysis'] == 'Pfam']
# Add a column to denote the gene.
newcol = [q]*len(pfam)
pfam.insert(0, value=newcol, column="Feature")
# Append to the temporary holder.
tmp.append(pfam)
# Concatenate into one pandas DataFrame
tmp = pandas.concat(tmp)
display(tmp)
scraper.to_json(results, outfile="sbw25.json")
loaded = scraper.from_json('sbw25.json')
loaded
results_pa = scraper.run_query(query=pdc_query(strain='UCBPP-PA14', feature='PA14_67210'))
results_pa["UCBPP-PA14__PA14_67210"]['References'].style.format({'pubmed_url': lambda x: '<a href={0}>link</a>'.format(x)})
| 0.491456 | 0.89096 |
## Import Libraries
```
import pandas as pd
from datetime import date
using_Google_colab = True
using_Anaconda_on_Mac_or_Linux = False
using_Anaconda_on_windows = False
if using_Google_colab:
from google.colab import drive
drive.mount('/content/drive')
```
Data Prep for USA_Facts confirmed cases
Read file
```
if using_Google_colab:
df_confirmed_cases = pd.read_csv('/content/drive/MyDrive/COVID_Project/input/USA_Facts/covid_confirmed_usafacts.csv')
if using_Anaconda_on_Mac_or_Linux:
df_confirmed_cases = pd.read_csv('../input/USA_Facts/covid_confirmed_usafacts.csv')
if using_Anaconda_on_windows:
df_confirmed_cases = pd.read_csv(r'..\input\USA_Facts\covid_confirmed_usafacts.csv')
df_confirmed_cases = df_confirmed_cases.astype({'countyFIPS': str}).astype({'stateFIPS': str})
df_confirmed_cases
```
Now group the data by state
```
df_confirmed_cases_by_state = df_confirmed_cases.groupby(['State', 'stateFIPS']).sum().reset_index()
df_confirmed_cases_by_state
```
Now I need to transpose so all dates are in rows
```
df_confirmed_cases_by_state_by_date = df_confirmed_cases_by_state.melt(id_vars=['State','stateFIPS'],
var_name='Date', value_name='Total Cases')
df_confirmed_cases_by_state_by_date
```
What does a Python created pivot table look like?
```
df_pivot_table = pd.pivot_table(df_confirmed_cases_by_state_by_date,
values='Total Cases',
columns='Date',
index=['State','stateFIPS'])
df_pivot_table
df_confirmed_cases_by_state_by_date = df_confirmed_cases_by_state_by_date.astype({'Date': 'datetime64[ns]'})
df_sorted_confirmed_cases = df_confirmed_cases_by_state_by_date.sort_values(by=['stateFIPS', 'Date'])
df_sorted_confirmed_cases
```
Now shift by one to get previous day cases and compute incremental cases
```
df_sorted_confirmed_cases['Incremental Cases'] = df_sorted_confirmed_cases.groupby('State')['Total Cases'].apply(
lambda x: x - x.shift(1))
df_sorted_confirmed_cases
```
Now compute 7 days rolling average
```
df_sorted_confirmed_cases['cases moving_avg'] = df_sorted_confirmed_cases.groupby('State')['Incremental Cases'].apply(
lambda x: (x + x.shift(1) + x.shift(2) + x.shift(3) + x.shift(4) + x.shift(5) + x.shift(6))/7)
if using_Google_colab:
df_sorted_confirmed_cases.to_csv('/content/drive/MyDrive/COVID_Project/output/confirmed_cases_by_state.csv')
if using_Anaconda_on_Mac_or_Linux:
df_sorted_confirmed_cases.to_csv('../output/confirmed_cases_by_state.csv')
if using_Anaconda_on_windows:
df_sorted_confirmed_cases.to_csv(r'..\output\confirmed_cases_by_state.csv')
```
Data prep for deaths
Read file
```
if using_Google_colab:
df_covid_deaths = pd.read_csv('/content/drive/MyDrive/COVID_Project/input/USA_Facts/covid_deaths_usafacts.csv')
if using_Anaconda_on_Mac_or_Linux:
df_covid_deaths = pd.read_csv('../input/USA_Facts/covid_deaths_usafacts.csv')
if using_Anaconda_on_windows:
df_covid_deaths = pd.read_csv(r'..\input\USA_Facts\covid_deaths_usafacts.csv')
df_covid_deaths = df_covid_deaths.astype({'countyFIPS': str}).astype({'stateFIPS': str})
```
Now group the data by State
```
df_covid_deaths_by_state = df_covid_deaths.groupby(['State', 'stateFIPS']).sum().reset_index()
df_covid_deaths_by_state
```
Now I need to transpose so all dates are in rows
```
df_covid_deaths = df_covid_deaths_by_state.melt(id_vars=['State','stateFIPS'],
var_name='Date',
value_name='Total Deaths')
df_covid_deaths = df_covid_deaths.astype({'Date': 'datetime64[ns]'})
df_sorted_covid_deaths = df_covid_deaths.sort_values(by=['stateFIPS', 'Date'])
df_sorted_covid_deaths
```
Now shift by one to get previous day deaths and compute incremental deaths
```
df_sorted_covid_deaths['Incremental Deaths'] = df_sorted_covid_deaths.groupby('State')['Total Deaths'].apply(
lambda x: x - x.shift(1))
df_sorted_covid_deaths
```
Now compute 7 days rolling average
```
df_sorted_covid_deaths['death moving_avg'] = df_sorted_covid_deaths.groupby('State')['Incremental Deaths'].apply(
lambda x: (x + x.shift(1) + x.shift(2) + x.shift(3) + x.shift(4) + x.shift(5) + x.shift(6))/7)
if using_Google_colab:
df_sorted_covid_deaths.to_csv('/content/drive/MyDrive/COVID_Project/output/covid_deaths_by_state.csv')
if using_Anaconda_on_Mac_or_Linux:
df_sorted_covid_deaths.to_csv('../output/covid_deaths_by_state.csv')
if using_Anaconda_on_windows:
df_sorted_covid_deaths.to_csv(r'..\output\covid_deaths_by_state.csv')
```
Create partial analytics_base_table with confirmed cases and deaths
```
df_abt_by_state = pd.merge(df_sorted_confirmed_cases, df_sorted_covid_deaths,
on=['stateFIPS', 'Date'],
suffixes=('', '_DROP'),
how='inner').filter(regex='^(?!.*_DROP)')
df_abt_by_state
if using_Google_colab:
df_abt_by_state.to_csv('/content/drive/MyDrive/COVID_Project/output/abt_by_state.csv')
if using_Anaconda_on_Mac_or_Linux:
df_abt_by_state.to_csv('../output/abt_by_state.csv')
if using_Anaconda_on_windows:
df_abt_by_state.to_csv(r'..\output\abt_by_state.csv')
```
|
github_jupyter
|
import pandas as pd
from datetime import date
using_Google_colab = True
using_Anaconda_on_Mac_or_Linux = False
using_Anaconda_on_windows = False
if using_Google_colab:
from google.colab import drive
drive.mount('/content/drive')
if using_Google_colab:
df_confirmed_cases = pd.read_csv('/content/drive/MyDrive/COVID_Project/input/USA_Facts/covid_confirmed_usafacts.csv')
if using_Anaconda_on_Mac_or_Linux:
df_confirmed_cases = pd.read_csv('../input/USA_Facts/covid_confirmed_usafacts.csv')
if using_Anaconda_on_windows:
df_confirmed_cases = pd.read_csv(r'..\input\USA_Facts\covid_confirmed_usafacts.csv')
df_confirmed_cases = df_confirmed_cases.astype({'countyFIPS': str}).astype({'stateFIPS': str})
df_confirmed_cases
df_confirmed_cases_by_state = df_confirmed_cases.groupby(['State', 'stateFIPS']).sum().reset_index()
df_confirmed_cases_by_state
df_confirmed_cases_by_state_by_date = df_confirmed_cases_by_state.melt(id_vars=['State','stateFIPS'],
var_name='Date', value_name='Total Cases')
df_confirmed_cases_by_state_by_date
df_pivot_table = pd.pivot_table(df_confirmed_cases_by_state_by_date,
values='Total Cases',
columns='Date',
index=['State','stateFIPS'])
df_pivot_table
df_confirmed_cases_by_state_by_date = df_confirmed_cases_by_state_by_date.astype({'Date': 'datetime64[ns]'})
df_sorted_confirmed_cases = df_confirmed_cases_by_state_by_date.sort_values(by=['stateFIPS', 'Date'])
df_sorted_confirmed_cases
df_sorted_confirmed_cases['Incremental Cases'] = df_sorted_confirmed_cases.groupby('State')['Total Cases'].apply(
lambda x: x - x.shift(1))
df_sorted_confirmed_cases
df_sorted_confirmed_cases['cases moving_avg'] = df_sorted_confirmed_cases.groupby('State')['Incremental Cases'].apply(
lambda x: (x + x.shift(1) + x.shift(2) + x.shift(3) + x.shift(4) + x.shift(5) + x.shift(6))/7)
if using_Google_colab:
df_sorted_confirmed_cases.to_csv('/content/drive/MyDrive/COVID_Project/output/confirmed_cases_by_state.csv')
if using_Anaconda_on_Mac_or_Linux:
df_sorted_confirmed_cases.to_csv('../output/confirmed_cases_by_state.csv')
if using_Anaconda_on_windows:
df_sorted_confirmed_cases.to_csv(r'..\output\confirmed_cases_by_state.csv')
if using_Google_colab:
df_covid_deaths = pd.read_csv('/content/drive/MyDrive/COVID_Project/input/USA_Facts/covid_deaths_usafacts.csv')
if using_Anaconda_on_Mac_or_Linux:
df_covid_deaths = pd.read_csv('../input/USA_Facts/covid_deaths_usafacts.csv')
if using_Anaconda_on_windows:
df_covid_deaths = pd.read_csv(r'..\input\USA_Facts\covid_deaths_usafacts.csv')
df_covid_deaths = df_covid_deaths.astype({'countyFIPS': str}).astype({'stateFIPS': str})
df_covid_deaths_by_state = df_covid_deaths.groupby(['State', 'stateFIPS']).sum().reset_index()
df_covid_deaths_by_state
df_covid_deaths = df_covid_deaths_by_state.melt(id_vars=['State','stateFIPS'],
var_name='Date',
value_name='Total Deaths')
df_covid_deaths = df_covid_deaths.astype({'Date': 'datetime64[ns]'})
df_sorted_covid_deaths = df_covid_deaths.sort_values(by=['stateFIPS', 'Date'])
df_sorted_covid_deaths
df_sorted_covid_deaths['Incremental Deaths'] = df_sorted_covid_deaths.groupby('State')['Total Deaths'].apply(
lambda x: x - x.shift(1))
df_sorted_covid_deaths
df_sorted_covid_deaths['death moving_avg'] = df_sorted_covid_deaths.groupby('State')['Incremental Deaths'].apply(
lambda x: (x + x.shift(1) + x.shift(2) + x.shift(3) + x.shift(4) + x.shift(5) + x.shift(6))/7)
if using_Google_colab:
df_sorted_covid_deaths.to_csv('/content/drive/MyDrive/COVID_Project/output/covid_deaths_by_state.csv')
if using_Anaconda_on_Mac_or_Linux:
df_sorted_covid_deaths.to_csv('../output/covid_deaths_by_state.csv')
if using_Anaconda_on_windows:
df_sorted_covid_deaths.to_csv(r'..\output\covid_deaths_by_state.csv')
df_abt_by_state = pd.merge(df_sorted_confirmed_cases, df_sorted_covid_deaths,
on=['stateFIPS', 'Date'],
suffixes=('', '_DROP'),
how='inner').filter(regex='^(?!.*_DROP)')
df_abt_by_state
if using_Google_colab:
df_abt_by_state.to_csv('/content/drive/MyDrive/COVID_Project/output/abt_by_state.csv')
if using_Anaconda_on_Mac_or_Linux:
df_abt_by_state.to_csv('../output/abt_by_state.csv')
if using_Anaconda_on_windows:
df_abt_by_state.to_csv(r'..\output\abt_by_state.csv')
| 0.13319 | 0.562116 |
# Doublespike Correction
Isopy contain the function ``ds_inversion`` that will perform the double spike inversion
```
import isopy
import numpy as np
import matplotlib.pyplot as plt
spike = isopy.array([0, 1, 1, 0], ['104pd', '106pd', '108pd', '110pd'])
sample = isopy.tb.make_ms_sample('pd', spike=spike, fnat = 0.1, fins=1.6)
result = isopy.tb.ds_inversion(sample, spike)
isopy.tb.plot_hstack(plt, result.fnat, figure_width=12, compare=True, subplots_grid=(-1, 3))
np.mean(result) #we can use array functions on the result object
isopy.sd2(result)
```
The function ``ds_Delta`` will calculate the fractionated value of a mass ratio from a natural fractionation factor
```
isopy.tb.ds_Delta('pd108/pd105', result, factor=1) #It will automatically get *fnat* from a DSResult object.
```
### Doublespike Interference correction
Isopy also contains the function ``ds_correction`` that will apply an iterative correction for isobaric interferences.
You can have a look at the [source code](https://isopy.readthedocs.io/en/latest/_modules/isopy/toolbox/doublespike.html#ds_correction) to see how this is implemented.
```
sample2 = isopy.tb.make_ms_sample('pd', spike=spike, fnat = 0.1, fins=1.6, ru101=0.1)
result = isopy.tb.ds_correction(sample2, spike)
np.mean(result)
```
For reference this is the result without the interference correction
```
result = isopy.tb.ds_inversion(sample2, spike)
np.mean(result)
```
### Comparing Rudge and Siebert methods
All double spike functions allow you to specify the method used for the inversion.
Your choices are ``"rudge"`` ([Rudge et al. 2009](https://doi.org/10.1016/j.chemgeo.2009.05.010)) and ``"siebert"`` ([Siebert et al. 2001](https://doi.org/10.1029/2000GC000124)).
As you can see from the examples below there is virtually no difference in the result between the different methods. The ``"siebert"`` method is faster however it will occasionally fail at extreme spike/sample fractions (See Finding the Optimal Doublespike). The default method of both ``ds_inversion`` and ``ds_correction`` is ``"rudge"``.
```
sample = isopy.tb.make_ms_sample('pd', spike=spike, fnat = 0.1, fins=1.6)
%timeit result = isopy.tb.ds_inversion(sample, spike, method='rudge') #Default method
np.mean(isopy.tb.ds_inversion(sample, spike, method='rudge'))
%timeit result = isopy.tb.ds_inversion(sample, spike, method='siebert')
np.mean(isopy.tb.ds_inversion(sample, spike, method='siebert'))
```
## Example workflow
Below is a short example of a
```
import isopy
import numpy as np
import matplotlib.pyplot as plt
```
Synthesise some analyses
```
spike = isopy.array(pd104=0, pd106=1, pd108=1, pd110=0)
standard = isopy.tb.make_ms_array('pd')
blank_compostion = {'pd': 1, 'ru101': 0.0001}
blank = isopy.tb.make_ms_sample(blank_compostion, maxv=0.1, fins=1.6, ru101=0.0001)
bracket1 = isopy.tb.make_ms_sample(standard, spike = spike, maxv=10, fnat=-0.09, fins=1.5, ru101=0.0001)
blank = isopy.tb.make_ms_sample(blank_compostion, maxv=0.1, fins=1.6, ru101=0.0001)
sample = isopy.tb.make_ms_sample(standard, blank=blank_compostion, blank_maxv = 0.1, spike = spike, maxv=10, fins=1.6, fnat=0.1, ru101=0.01)
bracket2 = isopy.tb.make_ms_sample(standard, spike = spike, maxv=10, fnat=-0.11, fins=1.7, ru101=0.0001)
```
Function that calculated the average blank value
```
def process_blank(blank, plot = True):
ratio = blank.ratio() #This will create a ratio against column with the largest value
outliers = isopy.tb.find_outliers(blank, axis=1)
accepted = np.invert(outliers)
blank_avg = np.mean(blank[accepted])
if plot:
isopy.tb.plot_hstack(plt, blank, outliers=outliers, color=isopy.tb.ColorPairs[0], compare=True, figure_height=6, figure_width=8)
return blank_avg
```
Function that performes the inversion
```
def process_sample(*samples, plot=True):
colors = isopy.tb.ColorPairs()
results = []
if plot:
figure = isopy.tb.update_figure(plt, height=10, width=8)
for sample in samples:
result = isopy.tb.ds_correction(sample, spike, standard)
outliers = isopy.tb.find_outliers(result.fnat)
accepted = np.invert(outliers)
if plot:
isopy.tb.plot_hstack(plt, result, outliers=outliers, color=colors.current, cval=True, pmval=True)
colors.next()
results.append(result[accepted])
if plot:
isopy.tb.plot_hcompare(figure)
if len(results) == 1:
return results[0]
else:
return tuple(results)
```
Process out samples
```
blank_avg = process_blank(blank)
bracket1_result, bracket2_result = process_sample(bracket1, bracket2)
sample_result = process_sample(sample-blank_avg)
delta_85 = isopy.tb.ds_Delta.delta('pd108/pd105', sample_result, bracket1_result, bracket2_result)
print(np.mean(delta_85), isopy.sd2(delta_85))
delta_85 = isopy.tb.ds_Delta.delta('pd108/pd105', sample_result)
print(np.mean(delta_85), isopy.sd2(delta_85))
```
|
github_jupyter
|
import isopy
import numpy as np
import matplotlib.pyplot as plt
spike = isopy.array([0, 1, 1, 0], ['104pd', '106pd', '108pd', '110pd'])
sample = isopy.tb.make_ms_sample('pd', spike=spike, fnat = 0.1, fins=1.6)
result = isopy.tb.ds_inversion(sample, spike)
isopy.tb.plot_hstack(plt, result.fnat, figure_width=12, compare=True, subplots_grid=(-1, 3))
np.mean(result) #we can use array functions on the result object
isopy.sd2(result)
isopy.tb.ds_Delta('pd108/pd105', result, factor=1) #It will automatically get *fnat* from a DSResult object.
sample2 = isopy.tb.make_ms_sample('pd', spike=spike, fnat = 0.1, fins=1.6, ru101=0.1)
result = isopy.tb.ds_correction(sample2, spike)
np.mean(result)
result = isopy.tb.ds_inversion(sample2, spike)
np.mean(result)
sample = isopy.tb.make_ms_sample('pd', spike=spike, fnat = 0.1, fins=1.6)
%timeit result = isopy.tb.ds_inversion(sample, spike, method='rudge') #Default method
np.mean(isopy.tb.ds_inversion(sample, spike, method='rudge'))
%timeit result = isopy.tb.ds_inversion(sample, spike, method='siebert')
np.mean(isopy.tb.ds_inversion(sample, spike, method='siebert'))
import isopy
import numpy as np
import matplotlib.pyplot as plt
spike = isopy.array(pd104=0, pd106=1, pd108=1, pd110=0)
standard = isopy.tb.make_ms_array('pd')
blank_compostion = {'pd': 1, 'ru101': 0.0001}
blank = isopy.tb.make_ms_sample(blank_compostion, maxv=0.1, fins=1.6, ru101=0.0001)
bracket1 = isopy.tb.make_ms_sample(standard, spike = spike, maxv=10, fnat=-0.09, fins=1.5, ru101=0.0001)
blank = isopy.tb.make_ms_sample(blank_compostion, maxv=0.1, fins=1.6, ru101=0.0001)
sample = isopy.tb.make_ms_sample(standard, blank=blank_compostion, blank_maxv = 0.1, spike = spike, maxv=10, fins=1.6, fnat=0.1, ru101=0.01)
bracket2 = isopy.tb.make_ms_sample(standard, spike = spike, maxv=10, fnat=-0.11, fins=1.7, ru101=0.0001)
def process_blank(blank, plot = True):
ratio = blank.ratio() #This will create a ratio against column with the largest value
outliers = isopy.tb.find_outliers(blank, axis=1)
accepted = np.invert(outliers)
blank_avg = np.mean(blank[accepted])
if plot:
isopy.tb.plot_hstack(plt, blank, outliers=outliers, color=isopy.tb.ColorPairs[0], compare=True, figure_height=6, figure_width=8)
return blank_avg
def process_sample(*samples, plot=True):
colors = isopy.tb.ColorPairs()
results = []
if plot:
figure = isopy.tb.update_figure(plt, height=10, width=8)
for sample in samples:
result = isopy.tb.ds_correction(sample, spike, standard)
outliers = isopy.tb.find_outliers(result.fnat)
accepted = np.invert(outliers)
if plot:
isopy.tb.plot_hstack(plt, result, outliers=outliers, color=colors.current, cval=True, pmval=True)
colors.next()
results.append(result[accepted])
if plot:
isopy.tb.plot_hcompare(figure)
if len(results) == 1:
return results[0]
else:
return tuple(results)
blank_avg = process_blank(blank)
bracket1_result, bracket2_result = process_sample(bracket1, bracket2)
sample_result = process_sample(sample-blank_avg)
delta_85 = isopy.tb.ds_Delta.delta('pd108/pd105', sample_result, bracket1_result, bracket2_result)
print(np.mean(delta_85), isopy.sd2(delta_85))
delta_85 = isopy.tb.ds_Delta.delta('pd108/pd105', sample_result)
print(np.mean(delta_85), isopy.sd2(delta_85))
| 0.449393 | 0.955734 |
```
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import roc_auc_score
import seaborn as sbn
%matplotlib inline
data = pd.read_csv('dataset/train.csv')
data.head()
target = data.pop('Survived')
sbn.barplot(x=data['Sex'], y=target, data=data);
sbn.barplot(x=data['Pclass'], y=target, data=data);
sbn.barplot(x=data['Sex'], y=target, hue=data['Pclass'], data=data);
sbn.barplot(x=data['Pclass'], y=target, hue=data['Sex'], data=data)
data.info()
```
Age, Cabin, Embarked shows missing data
find the missiong value and replace
```
x['Age'].value_counts(dropna=False)
# Replace missing value with it's mean
data['Age'].fillna(data['Age'].mean(), inplace=True)
data['Embarked'].value_counts(dropna=False)
# Replace missing value with Mode
data['Embarked'].fillna('S', inplace=True)
def clean_cabin(x):
try:
return x[0]
except TypeError:
return 'None'
data['Cabin'] = data['Cabin'].apply(clean_cabin)
data['Cabin'].head()
data['Cabin'].value_counts(dropna=False)
data.info()
data.head()
del data['PassengerId']
# convert sex categorical to numeric
data['Sex'].replace(['female','male'],[0,1],inplace=True)
data.head()
#x.fillna(0)
data['Cabin'].value_counts(dropna=False)
def age_band(dl):
if dl <= 10.0 : return 'Age_band_1'
elif 10.0 < dl <= 20.0 : return 'Age_band_2'
elif 20.0 < dl <= 30.0 : return 'Age_band_3'
elif 30.0 < dl <= 40.0 : return 'Age_band_4'
elif 40.0 < dl <= 50.0 : return 'Age_band_5'
elif 50.0 < dl <= 60.0 : return 'Age_band_6'
elif 60.0 < dl <= 70.0 : return 'Age_band_7'
elif 70.0 < dl <= 80.0 : return 'Age_band_8'
elif 80.0 < dl <= 90.0 : return 'Age_band_9'
elif 90.0 < dl <= 100.0 : return 'Age_band_10'
#else: return 'None'
data['Age'] = data['Age'].map(age_band)
data['Age'].value_counts(dropna=False)
# create dummy variables at a time for all categorical variable
categorical_variable = ['Age','Pclass','Cabin','Embarked']
for variable in categorical_variable:
# create missing data wirth 'Missing' world
data[variable].fillna('Missing', inplace=True)
# Create array od fummys
dummies = pd.get_dummies(data[variable], prefix=variable)
# Update x to tindicate dummies and drop the main variable
data= pd.concat([data, dummies], axis=1)
data.drop([variable], axis=1, inplace=True)
data.head()
del data['Cabin_T']
del data['Ticket']
del data['Name']
data.shape
data.columns
model = RandomForestRegressor(n_estimators=100, oob_score=True, n_jobs=-1, random_state=42)
model.fit(data,target)
print('C-stat', roc_auc_score(y, model.oob_prediction_))
model.feature_importances_
srt =pd.Series(model.feature_importances_,index=data.columns)
type(srt)
srt = srt.sort_values(axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last')
#feature = pd.Series(model.feature_importances_, index=x.columns)
#feature.sort()
srt.plot(kind='barh', figsize=(7,6))
%%timeit
model = RandomForestRegressor(1000, oob_score=True, n_jobs=1, random_state=42)
model.fit(data,target)
%%timeit
model = RandomForestRegressor(1000, oob_score=True, n_jobs=-1, random_state=42)
model.fit(data,target)
# n_estimator
result = []
n_estimator_option = [30, 50, 100, 200, 500, 1000, 2000]
for trees in n_estimator_option:
model = RandomForestRegressor(trees, oob_score=True, n_jobs=-1, random_state=42)
model.fit(data,target)
print(trees, 'Trees')
roc = roc_auc_score(y, model.oob_prediction_)
print('C-stat', roc)
result.append(roc)
print(" ")
pd.Series(result, n_estimator_option).plot()
# Max_features
result = []
max_feature_option = ['auto', None, 'sqrt', 'log2', 0.9,0.2]
for max_features in max_feature_option:
model = RandomForestRegressor(n_estimators=1000, oob_score=True, n_jobs=-1, random_state=42, max_features=max_features)
model.fit(data,target)
print(max_features, 'option')
roc = roc_auc_score(y, model.oob_prediction_)
print('C-stat', roc)
result.append(roc)
print(' ')
pd.Series(result, max_feature_option).plot(kind="barh", xlim=(.85,.88))
# min_sample_leaf
result =[]
min_sample_leaf_option = [1,2,3,4,5,6,7,8,9,10]
for min_samples in min_sample_leaf_option:
model = RandomForestRegressor(n_estimators=1000,
oob_score=True,
n_jobs= -1,
random_state= 42,
max_features='auto',
min_samples_leaf=min_samples)
model.fit(data,target)
print(min_samples, 'min samples')
roc = roc_auc_score(y, model.oob_prediction_)
print('C-stat', roc)
result.append(roc)
print(' ')
pd.Series(result, min_sample_leaf_option).plot()
# final model we find all best values
model = RandomForestRegressor(n_estimators=1000,
oob_score=True,
n_jobs=-1,
random_state=42,
max_features='auto',
min_samples_leaf=6)
model.fit(data,target)
roc= roc_auc_score(y, model.oob_prediction_)
print('C-stat : ', roc)
result= model.fit(data,target)
result
```
# Load Test Dataset
```
test_data = pd.read_csv('dataset/test.csv')
test_data.head()
test_data.info()
test_data['Sex'].replace(['female','male'],[0,1],inplace=True)
PassengerId = test_data[['PassengerId']]
del test_data['Name']
del test_data['Ticket']
del test_data['PassengerId']
test_data['Age'].fillna(test_data['Age'].mean(), inplace=True)
test_data.info()
def clean_cabin(x):
try:
return x[0]
except TypeError:
return 'None'
test_data['Cabin'] = test_data['Cabin'].apply(clean_cabin)
test_data['Cabin'].value_counts()
test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True)
test_data.info()
test_data['Age'] = test_data['Age'].map(age_band)
test_data['Age'].value_counts(dropna=False)
# create dummy variables at a time for all categorical variable
categorical_variable2 = ['Age','Pclass','Cabin','Embarked']
for variable2 in categorical_variable2:
# create missing data wirth 'Missing' world
test_data[variable2].fillna('Missing', inplace=True)
# Create array od fummys
dummies = pd.get_dummies(test_data[variable2], prefix=variable2)
# Update x to tindicate dummies and drop the main variable
test_data= pd.concat([test_data, dummies], axis=1)
test_data.drop([variable2], axis=1, inplace=True)
test_data.head()
data.head()
# model.predict(test_data)
Survived= model.predict(test_data)
type(test_data)
type(Survived)
Survived = np.where(Survived > 0.5, 1, 0)
Survived
Survived = pd.DataFrame(Survived)
Survived=Survived.rename(columns = {0:'Survived'})
#type(Survived)
Survived.head()
final = pd.concat([test_data,Survived], axis=1)
final.head()
final = pd.concat([test_data,Survived], axis=1)
# result fo kaggle
kaggle = pd.concat([PassengerId, Survived], axis=1)
kaggle.head()
kaggle.describe()
kaggle['Survived'].value_counts(dropna=False).head()
pd.DataFrame.to_csv(kaggle, path_or_buf='Solution_7.csv',index=False)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import roc_auc_score
import seaborn as sbn
%matplotlib inline
data = pd.read_csv('dataset/train.csv')
data.head()
target = data.pop('Survived')
sbn.barplot(x=data['Sex'], y=target, data=data);
sbn.barplot(x=data['Pclass'], y=target, data=data);
sbn.barplot(x=data['Sex'], y=target, hue=data['Pclass'], data=data);
sbn.barplot(x=data['Pclass'], y=target, hue=data['Sex'], data=data)
data.info()
x['Age'].value_counts(dropna=False)
# Replace missing value with it's mean
data['Age'].fillna(data['Age'].mean(), inplace=True)
data['Embarked'].value_counts(dropna=False)
# Replace missing value with Mode
data['Embarked'].fillna('S', inplace=True)
def clean_cabin(x):
try:
return x[0]
except TypeError:
return 'None'
data['Cabin'] = data['Cabin'].apply(clean_cabin)
data['Cabin'].head()
data['Cabin'].value_counts(dropna=False)
data.info()
data.head()
del data['PassengerId']
# convert sex categorical to numeric
data['Sex'].replace(['female','male'],[0,1],inplace=True)
data.head()
#x.fillna(0)
data['Cabin'].value_counts(dropna=False)
def age_band(dl):
if dl <= 10.0 : return 'Age_band_1'
elif 10.0 < dl <= 20.0 : return 'Age_band_2'
elif 20.0 < dl <= 30.0 : return 'Age_band_3'
elif 30.0 < dl <= 40.0 : return 'Age_band_4'
elif 40.0 < dl <= 50.0 : return 'Age_band_5'
elif 50.0 < dl <= 60.0 : return 'Age_band_6'
elif 60.0 < dl <= 70.0 : return 'Age_band_7'
elif 70.0 < dl <= 80.0 : return 'Age_band_8'
elif 80.0 < dl <= 90.0 : return 'Age_band_9'
elif 90.0 < dl <= 100.0 : return 'Age_band_10'
#else: return 'None'
data['Age'] = data['Age'].map(age_band)
data['Age'].value_counts(dropna=False)
# create dummy variables at a time for all categorical variable
categorical_variable = ['Age','Pclass','Cabin','Embarked']
for variable in categorical_variable:
# create missing data wirth 'Missing' world
data[variable].fillna('Missing', inplace=True)
# Create array od fummys
dummies = pd.get_dummies(data[variable], prefix=variable)
# Update x to tindicate dummies and drop the main variable
data= pd.concat([data, dummies], axis=1)
data.drop([variable], axis=1, inplace=True)
data.head()
del data['Cabin_T']
del data['Ticket']
del data['Name']
data.shape
data.columns
model = RandomForestRegressor(n_estimators=100, oob_score=True, n_jobs=-1, random_state=42)
model.fit(data,target)
print('C-stat', roc_auc_score(y, model.oob_prediction_))
model.feature_importances_
srt =pd.Series(model.feature_importances_,index=data.columns)
type(srt)
srt = srt.sort_values(axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last')
#feature = pd.Series(model.feature_importances_, index=x.columns)
#feature.sort()
srt.plot(kind='barh', figsize=(7,6))
%%timeit
model = RandomForestRegressor(1000, oob_score=True, n_jobs=1, random_state=42)
model.fit(data,target)
%%timeit
model = RandomForestRegressor(1000, oob_score=True, n_jobs=-1, random_state=42)
model.fit(data,target)
# n_estimator
result = []
n_estimator_option = [30, 50, 100, 200, 500, 1000, 2000]
for trees in n_estimator_option:
model = RandomForestRegressor(trees, oob_score=True, n_jobs=-1, random_state=42)
model.fit(data,target)
print(trees, 'Trees')
roc = roc_auc_score(y, model.oob_prediction_)
print('C-stat', roc)
result.append(roc)
print(" ")
pd.Series(result, n_estimator_option).plot()
# Max_features
result = []
max_feature_option = ['auto', None, 'sqrt', 'log2', 0.9,0.2]
for max_features in max_feature_option:
model = RandomForestRegressor(n_estimators=1000, oob_score=True, n_jobs=-1, random_state=42, max_features=max_features)
model.fit(data,target)
print(max_features, 'option')
roc = roc_auc_score(y, model.oob_prediction_)
print('C-stat', roc)
result.append(roc)
print(' ')
pd.Series(result, max_feature_option).plot(kind="barh", xlim=(.85,.88))
# min_sample_leaf
result =[]
min_sample_leaf_option = [1,2,3,4,5,6,7,8,9,10]
for min_samples in min_sample_leaf_option:
model = RandomForestRegressor(n_estimators=1000,
oob_score=True,
n_jobs= -1,
random_state= 42,
max_features='auto',
min_samples_leaf=min_samples)
model.fit(data,target)
print(min_samples, 'min samples')
roc = roc_auc_score(y, model.oob_prediction_)
print('C-stat', roc)
result.append(roc)
print(' ')
pd.Series(result, min_sample_leaf_option).plot()
# final model we find all best values
model = RandomForestRegressor(n_estimators=1000,
oob_score=True,
n_jobs=-1,
random_state=42,
max_features='auto',
min_samples_leaf=6)
model.fit(data,target)
roc= roc_auc_score(y, model.oob_prediction_)
print('C-stat : ', roc)
result= model.fit(data,target)
result
test_data = pd.read_csv('dataset/test.csv')
test_data.head()
test_data.info()
test_data['Sex'].replace(['female','male'],[0,1],inplace=True)
PassengerId = test_data[['PassengerId']]
del test_data['Name']
del test_data['Ticket']
del test_data['PassengerId']
test_data['Age'].fillna(test_data['Age'].mean(), inplace=True)
test_data.info()
def clean_cabin(x):
try:
return x[0]
except TypeError:
return 'None'
test_data['Cabin'] = test_data['Cabin'].apply(clean_cabin)
test_data['Cabin'].value_counts()
test_data['Fare'].fillna(test_data['Fare'].mean(), inplace=True)
test_data.info()
test_data['Age'] = test_data['Age'].map(age_band)
test_data['Age'].value_counts(dropna=False)
# create dummy variables at a time for all categorical variable
categorical_variable2 = ['Age','Pclass','Cabin','Embarked']
for variable2 in categorical_variable2:
# create missing data wirth 'Missing' world
test_data[variable2].fillna('Missing', inplace=True)
# Create array od fummys
dummies = pd.get_dummies(test_data[variable2], prefix=variable2)
# Update x to tindicate dummies and drop the main variable
test_data= pd.concat([test_data, dummies], axis=1)
test_data.drop([variable2], axis=1, inplace=True)
test_data.head()
data.head()
# model.predict(test_data)
Survived= model.predict(test_data)
type(test_data)
type(Survived)
Survived = np.where(Survived > 0.5, 1, 0)
Survived
Survived = pd.DataFrame(Survived)
Survived=Survived.rename(columns = {0:'Survived'})
#type(Survived)
Survived.head()
final = pd.concat([test_data,Survived], axis=1)
final.head()
final = pd.concat([test_data,Survived], axis=1)
# result fo kaggle
kaggle = pd.concat([PassengerId, Survived], axis=1)
kaggle.head()
kaggle.describe()
kaggle['Survived'].value_counts(dropna=False).head()
pd.DataFrame.to_csv(kaggle, path_or_buf='Solution_7.csv',index=False)
| 0.234319 | 0.741124 |
### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
```
## Player Count
* Display the total number of players
```
pd.DataFrame({'Total Players':[purchase_data.SN.nunique()]})
```
## Purchasing Analysis (Total)
* Run basic calculations to obtain number of unique items, average price, etc.
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
```
pd.options.display.float_format = '${:,.2f}'.format
purchase_data.head()
pd.DataFrame({'Number of Unique Items': [purchase_data['Item ID'].nunique()],\
'Average Price':[purchase_data.Price.mean()],\
'Number of Purchases':[purchase_data.Price.count()],
'Total Revenue':[purchase_data.Price.sum()]})
```
## Gender Demographics
* Percentage and Count of Male Players
* Percentage and Count of Female Players
* Percentage and Count of Other / Non-Disclosed
```
import pandas as pd
# File to Load (Remember to Change These)
file_to_upload = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
df = pd.read_csv(file_to_upload)
groupby_var= purchase_data.groupby("Gender")
count_var= groupby_var["SN"].nunique()
total_players=int(df["SN"].nunique())
percent_total=(count_var/total_players)
gender_data=pd.DataFrame({'Total Count':count_var,'Percentage of Players':percent_total})
gender_data["Percentage of Players"] = gender_data["Percentage of Players"].map("{:.2%}".format)
gender_data
```
## Purchasing Analysis (Gender)
* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
```
df = pd.read_csv(file_to_upload)
groupby_var= purchase_data.groupby("Gender")
purchase_count = groupby_var["Purchase ID"].count()
avg_purchase_price = groupby_var["Price"].mean()
avg_purchase_total = groupby_var["Price"].sum()
total_count_gender=int(df["SN"].nunique())
avg_purchase_per_person = avg_purchase_total/total_count_gender
gender_data = pd.DataFrame({"Purchase Count": purchase_count,
"Average Purchase Price": avg_purchase_price,
"Average Purchase Value":avg_purchase_total,
"Avg Purchase Total per Person": avg_purchase_per_person})
gender_data.index.name = "Gender"
gender_data.style.format({"Average Purchase Value":"${:,.2f}",
"Average Purchase Price":"${:,.2f}",
"Avg Purchase Total per Person":"${:,.2f}"})
```
## Age Demographics
* Establish bins for ages
* Categorize the existing players using the age bins. Hint: use pd.cut()
* Calculate the numbers and percentages by age group
* Create a summary data frame to hold the results
* Optional: round the percentage column to two decimal points
* Display Age Demographics Table
```
import pandas as pd
file_to_upload = "Resources/purchase_data.csv"
df = pd.read_csv(file_to_upload)
bins=[0,9,14,19,24,29,34,39,60]
bins_age=["<10","10-14","15-19","20-24","25-29","30-34","35-39","40+"]
purchase_data["Age Group"]=pd.cut(purchase_data["Age"],bins, labels=bins_age)
purchase_data
age_data= purchase_data.groupby("Age Group")
total_count_age = age_data["SN"].nunique()
#percents
percent_age= (total_count_age/total_players)*100
age_demo=pd.DataFrame({"Total Count":total_count_age,"Percent of Players":percent_age})
age_demo.index.name= None
age_demo.style.format({"Percent of Players":"{:,.2f}"})
```
## Purchasing Analysis (Age)
* Bin the purchase_data data frame by age
* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
```
bins=[0,9,14,19,24,29,34,39,60]
bins_age=["<10","10-14","15-19","20-24","25-29","30-34","35-39","40+"]
purchase_count= age_data["Purchase ID"].count()
avg_price=age_data["Price"].mean()
avg_total=age_data["Price"].sum()
age_data= purchase_data.groupby("Age Group")
avg_purchase= (avg_total/total_count_age)
age_demo= pd.DataFrame({"Purchase Count":purchase_count,"Average Purchase Price":avg_price,"Total Purchase Value":avg_total,"Avg Total Purchase per Person":avg_purchase})
age_demo.index.name= "Age Ranges"
total_p= age_demo.sort_values(["Avg Total Purchase per Person"], ascending=False).head()
age_demo.style.format({"Average Purchase Price":"${:,.2f}",
"Total Purchase Value":"${:,.2f}",
"Avg Total Purchase per Person":"${:,.2f}"})
```
## Top Spenders
* Run basic calculations to obtain the results in the table below
* Create a summary data frame to hold the results
* Sort the total purchase value column in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the summary data frame
```
file_to_load = "Resources/purchase_data.csv"
purchase_data = pd.read_csv(file_to_load)
spender_data=purchase_data.groupby("SN")
purchase_spender = spender_data["Purchase ID"].count()
avg_purchase_price = spender_data["Price"].mean()
spender_total = spender_data["Price"].sum()
high_spenders=pd.DataFrame({"Purchase Count": purchase_spender,
"Average Purchase Price": avg_purchase_price,
"Total Purchase Value":spender_total})
spenders_f= high_spenders.sort_values(["Total Purchase Value"], ascending=False).head()
spenders_f.style.format({"Average Purchase Total":"${:,.2f}",
"Average Purchase Price":"${:,.2f}",
"Total Purchase Value":"${:,.2f}"})
```
## Most Popular Items
* Retrieve the Item ID, Item Name, and Item Price columns
* Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value
* Create a summary data frame to hold the results
* Sort the purchase count column in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the summary data frame
```
import pandas as pd
file_to_load = "Resources/purchase_data.csv"
purchase_data = pd.read_csv(file_to_load)
items = purchase_data[["Item ID", "Item Name", "Price"]]
item_stats = items.groupby(["Item ID","Item Name"])
#count
purchase_count = item_stats["Price"].count()
#total
purchase_value = (item_stats["Price"].sum())
item_price = purchase_value/purchase_count
most_popular_items = pd.DataFrame({"Purchase Count": purchase_count,
"Item Price": item_price,
"Total Purchase Value":purchase_value})
popular_f = most_popular_items.sort_values(["Purchase Count"], ascending=False).head()
#rounding dollar
popular_f.style.format({"Item Price":"${:,.2f}",
"Total Purchase Value":"${:,.2f}"})
```
Most Profitable Items
* Sort the above table by total purchase value in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the data frame
```
most_popular_items.sort_values(["Total Purchase Value"], ascending=False).head()
popular_f.style.format({"Item Price":"${:,.2f}",
"Total Purchase Value":"${:,.2f}"})
```
|
github_jupyter
|
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
pd.DataFrame({'Total Players':[purchase_data.SN.nunique()]})
pd.options.display.float_format = '${:,.2f}'.format
purchase_data.head()
pd.DataFrame({'Number of Unique Items': [purchase_data['Item ID'].nunique()],\
'Average Price':[purchase_data.Price.mean()],\
'Number of Purchases':[purchase_data.Price.count()],
'Total Revenue':[purchase_data.Price.sum()]})
import pandas as pd
# File to Load (Remember to Change These)
file_to_upload = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
df = pd.read_csv(file_to_upload)
groupby_var= purchase_data.groupby("Gender")
count_var= groupby_var["SN"].nunique()
total_players=int(df["SN"].nunique())
percent_total=(count_var/total_players)
gender_data=pd.DataFrame({'Total Count':count_var,'Percentage of Players':percent_total})
gender_data["Percentage of Players"] = gender_data["Percentage of Players"].map("{:.2%}".format)
gender_data
df = pd.read_csv(file_to_upload)
groupby_var= purchase_data.groupby("Gender")
purchase_count = groupby_var["Purchase ID"].count()
avg_purchase_price = groupby_var["Price"].mean()
avg_purchase_total = groupby_var["Price"].sum()
total_count_gender=int(df["SN"].nunique())
avg_purchase_per_person = avg_purchase_total/total_count_gender
gender_data = pd.DataFrame({"Purchase Count": purchase_count,
"Average Purchase Price": avg_purchase_price,
"Average Purchase Value":avg_purchase_total,
"Avg Purchase Total per Person": avg_purchase_per_person})
gender_data.index.name = "Gender"
gender_data.style.format({"Average Purchase Value":"${:,.2f}",
"Average Purchase Price":"${:,.2f}",
"Avg Purchase Total per Person":"${:,.2f}"})
import pandas as pd
file_to_upload = "Resources/purchase_data.csv"
df = pd.read_csv(file_to_upload)
bins=[0,9,14,19,24,29,34,39,60]
bins_age=["<10","10-14","15-19","20-24","25-29","30-34","35-39","40+"]
purchase_data["Age Group"]=pd.cut(purchase_data["Age"],bins, labels=bins_age)
purchase_data
age_data= purchase_data.groupby("Age Group")
total_count_age = age_data["SN"].nunique()
#percents
percent_age= (total_count_age/total_players)*100
age_demo=pd.DataFrame({"Total Count":total_count_age,"Percent of Players":percent_age})
age_demo.index.name= None
age_demo.style.format({"Percent of Players":"{:,.2f}"})
bins=[0,9,14,19,24,29,34,39,60]
bins_age=["<10","10-14","15-19","20-24","25-29","30-34","35-39","40+"]
purchase_count= age_data["Purchase ID"].count()
avg_price=age_data["Price"].mean()
avg_total=age_data["Price"].sum()
age_data= purchase_data.groupby("Age Group")
avg_purchase= (avg_total/total_count_age)
age_demo= pd.DataFrame({"Purchase Count":purchase_count,"Average Purchase Price":avg_price,"Total Purchase Value":avg_total,"Avg Total Purchase per Person":avg_purchase})
age_demo.index.name= "Age Ranges"
total_p= age_demo.sort_values(["Avg Total Purchase per Person"], ascending=False).head()
age_demo.style.format({"Average Purchase Price":"${:,.2f}",
"Total Purchase Value":"${:,.2f}",
"Avg Total Purchase per Person":"${:,.2f}"})
file_to_load = "Resources/purchase_data.csv"
purchase_data = pd.read_csv(file_to_load)
spender_data=purchase_data.groupby("SN")
purchase_spender = spender_data["Purchase ID"].count()
avg_purchase_price = spender_data["Price"].mean()
spender_total = spender_data["Price"].sum()
high_spenders=pd.DataFrame({"Purchase Count": purchase_spender,
"Average Purchase Price": avg_purchase_price,
"Total Purchase Value":spender_total})
spenders_f= high_spenders.sort_values(["Total Purchase Value"], ascending=False).head()
spenders_f.style.format({"Average Purchase Total":"${:,.2f}",
"Average Purchase Price":"${:,.2f}",
"Total Purchase Value":"${:,.2f}"})
import pandas as pd
file_to_load = "Resources/purchase_data.csv"
purchase_data = pd.read_csv(file_to_load)
items = purchase_data[["Item ID", "Item Name", "Price"]]
item_stats = items.groupby(["Item ID","Item Name"])
#count
purchase_count = item_stats["Price"].count()
#total
purchase_value = (item_stats["Price"].sum())
item_price = purchase_value/purchase_count
most_popular_items = pd.DataFrame({"Purchase Count": purchase_count,
"Item Price": item_price,
"Total Purchase Value":purchase_value})
popular_f = most_popular_items.sort_values(["Purchase Count"], ascending=False).head()
#rounding dollar
popular_f.style.format({"Item Price":"${:,.2f}",
"Total Purchase Value":"${:,.2f}"})
most_popular_items.sort_values(["Total Purchase Value"], ascending=False).head()
popular_f.style.format({"Item Price":"${:,.2f}",
"Total Purchase Value":"${:,.2f}"})
| 0.400163 | 0.842539 |
```
import numpy as np
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Cropping3D
from keras import backend as K
def format_decimal(arr, places=6):
return [round(x * 10**places) / 10**places for x in arr]
```
### Cropping3D
**[convolutional.Cropping3D.0] cropping ((1,1), (1,1), (1,1)) on 3x5x3x3 input, dim_ordering=tf**
```
data_in_shape = (3, 5, 3, 3)
L = Cropping3D(cropping=((1,1), (1,1), (1,1)), dim_ordering='tf')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(260)
data_in = 2 * np.random.random(data_in_shape) - 1
print('')
print('in shape:', data_in_shape)
print('in:', format_decimal(data_in.ravel().tolist()))
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
```
**[convolutional.Cropping3D.1] cropping ((1,1), (1,1), (1,1)) on 3x5x3x3 input, dim_ordering=th**
```
data_in_shape = (3, 5, 3, 3)
L = Cropping3D(cropping=((1,1), (1,1), (1,1)), dim_ordering='th')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(260)
data_in = 2 * np.random.random(data_in_shape) - 1
print('')
print('in shape:', data_in_shape)
print('in:', format_decimal(data_in.ravel().tolist()))
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
```
**[convolutional.Cropping3D.2] cropping (3,2), (2,1), (2,3)) on 7x6x6x6 input, dim_ordering=tf**
```
data_in_shape = (7, 6, 6, 6)
L = Cropping3D(cropping=((3,2), (2,1), (2,3)), dim_ordering='tf')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(262)
data_in = 2 * np.random.random(data_in_shape) - 1
print('')
print('in shape:', data_in_shape)
print('in:', format_decimal(data_in.ravel().tolist()))
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
```
**[convolutional.Cropping3D.3] cropping (3,2), (2,1), (2,3)) on 7x6x6x6 input, dim_ordering=th**
```
data_in_shape = (7, 6, 6, 6)
L = Cropping3D(cropping=((3,2), (2,1), (2,3)), dim_ordering='th')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(262)
data_in = 2 * np.random.random(data_in_shape) - 1
print('')
print('in shape:', data_in_shape)
print('in:', format_decimal(data_in.ravel().tolist()))
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
```
|
github_jupyter
|
import numpy as np
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Cropping3D
from keras import backend as K
def format_decimal(arr, places=6):
return [round(x * 10**places) / 10**places for x in arr]
data_in_shape = (3, 5, 3, 3)
L = Cropping3D(cropping=((1,1), (1,1), (1,1)), dim_ordering='tf')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(260)
data_in = 2 * np.random.random(data_in_shape) - 1
print('')
print('in shape:', data_in_shape)
print('in:', format_decimal(data_in.ravel().tolist()))
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
data_in_shape = (3, 5, 3, 3)
L = Cropping3D(cropping=((1,1), (1,1), (1,1)), dim_ordering='th')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(260)
data_in = 2 * np.random.random(data_in_shape) - 1
print('')
print('in shape:', data_in_shape)
print('in:', format_decimal(data_in.ravel().tolist()))
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
data_in_shape = (7, 6, 6, 6)
L = Cropping3D(cropping=((3,2), (2,1), (2,3)), dim_ordering='tf')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(262)
data_in = 2 * np.random.random(data_in_shape) - 1
print('')
print('in shape:', data_in_shape)
print('in:', format_decimal(data_in.ravel().tolist()))
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
data_in_shape = (7, 6, 6, 6)
L = Cropping3D(cropping=((3,2), (2,1), (2,3)), dim_ordering='th')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(262)
data_in = 2 * np.random.random(data_in_shape) - 1
print('')
print('in shape:', data_in_shape)
print('in:', format_decimal(data_in.ravel().tolist()))
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
| 0.677474 | 0.869715 |
```
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import numpy as np
from LSsurf import smooth_xytb_fit
import pointCollection as pc
import sparseqr
import glob
import h5py
import os
import LSsurf
import scipy.stats as sps
%matplotlib widget
def safe_interp(x, x0_in, y0_in):
y=np.NaN
if x0_in[-1] < x0_in[0]:
x0=x0_in[::-1]
y0=y0_in[::-1]
else:
x0=x0_in
y0=y0_in
try:
i0=np.argwhere(x0 < x)[-1][0]
i1=np.argwhere(x0 >=x)[0][0]
#print([i0, i1])
#print( x0[[i0, i1]])
#print( y0[[i0, i1]])
y=np.interp(x, x0[[i0, i1]], y0[[i0, i1]])
except Exception:
pass
return y
ATL11_index='/att/nobackup/project/icesat-2/ATL14_processing//ATL11_004/north/index/GeoIndex.h5'
mask_file='/home/besmith4/git_repos/surfaceChange/masks/Arctic/U_Texas_ice_mask_2019.tif'
mask=pc.grid.data().from_geotif(mask_file)
mask.show()
xy0=np.round(np.array([np.mean(ii) for ii in [plt.gca().get_xlim(),plt.gca().get_ylim() ]])/1.e4)*1.e4
pad=np.array([-1.e4, 1.e4])
D11=pc.geoIndex().from_file(ATL11_index).query_xy_box(xy0[0]+pad, xy0[1]+pad)
data_top='/home/besmith4/shared/ATL11_processing/Arctic_003_cycle_03_09/003'
#data_top='/Data/ATL11'
all_files = glob.glob(data_top+'/ATL11*01.h5')
sorted_files = sorted(all_files, key = os.path.getsize, reverse=True)
len(all_files)
file=sorted_files[0]
print(file)
#D11=pc.ATL11.data().from_h5(file)
sorted_files=['/home/besmith4/shared/ATL11_processing/Arctic_003_cycle_03_09/003/ATL11_091703_0309_003_01.h5']
for file in sorted_files[0:1]:
D11=pc.ATL11.data().from_h5(file)
plt.figure(1, figsize=[6, 4]); plt.clf()
plt.subplot(131)
plt.plot(D11.x_atc, D11.h_corr,'.')
plt.subplot(132)
ii = (D11.fit_quality[:,0]==0).ravel()
plt.plot(D11.x_atc[ii,:], D11.h_corr[ii,:],'.')
plt.title(os.path.basename(file))
plt.subplot(133)
plt.plot(D11.x_atc, np.sum(np.isfinite(D11.h_corr), axis=1))
D11.index(D11.fit_quality[:,0] ==0)
# define the domain's width in x, y, and time
W={'x':4.e4,'y':400,'t':.2}
# define the grid center:
XR=np.nanmean(D11.x_atc)+np.array([-1, 1])*W['x']/2
ctr={'x':XR[0]+W['x']/2., 'y':0., 't':0.}
# define the grid spacing
spacing={'z0':100, 'dz':100, 'dt':.1}
D=pc.data().from_dict({'x':D11.x_atc[:,0], 'y':np.zeros_like(D11.x_atc[:,0]),'z':D11.h_corr[:,0],\
'time':np.zeros_like(D11.x_atc[:,0]), 'sigma':D11.h_corr_sigma[:,0]})
# To ensure a time-constant simulation, replicate the data at times -0.5 and 0.5:
#data=pc.data().from_list([D, D.copy().assign({'time':np.zeros_like(D.x)}), D.copy().assign({'time':np.zeros_like(D.x)+0.5})])
data=D
data.index(np.isfinite(data.z) & np.isfinite(data.sigma) & (data.sigma>0))
! gdalsrsinfo -o proj4 EPSG:3413
2+2
# define the expected statistics of the surface
from LSsurf import smooth_xytb_fit
E_d3zdx2dt=0.0001
E_d2z0dx2=0.006
E_d2zdt2=5000
data_gap_scale=2500
data1=data[(data.x > XR[0]) & (data.x < XR[0]+W['x'])]
srs_proj4='+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs '
mask_file='/home/besmith4/nobackup/masks/Arctic/GimpIceMask_100m_edited.tif'
scale_vals=np.array([ 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 10, 100])[::-1]
sigma_hat_vals=np.zeros_like(scale_vals)
sigma_hat_s_vals=np.zeros_like(scale_vals)
N_vals=np.zeros_like(scale_vals)
S=[]
d_ed=[]
for ii, scale_val in enumerate(scale_vals):
print(scale_val)
# run the fit
E_RMS={'d2z0_dx2': E_d2z0dx2*scale_val,
'dz0_dx': E_d2z0dx2*data_gap_scale*scale_val,
'd3z_dx2dt':E_d3zdx2dt ,
'd2z_dxdt': E_d3zdx2dt*data_gap_scale,
'd2z_dt2': E_d2zdt2}
srs_proj4=None
S.append(smooth_xytb_fit(data=data1, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS,
reference_epoch=1, N_subset=None, compute_E=False,
max_iterations=5,
VERBOSE=False, dzdt_lags=[1]))
d_ed.append(S[-1]['data'])
d_ed[-1].index(d_ed[-1].three_sigma_edit==1)
sigma_hat_vals[ii] = LSsurf.RDE(d_ed[-1].z-d_ed[-1].z_est)
N_vals[ii]=d_ed[-1].size
sigma_hat_s_vals[ii] = LSsurf.RDE((d_ed[-1].z-d_ed[-1].z_est)/d_ed[-1].sigma)
# plot the results
fig=plt.figure( figsize=[6,6])
fig.clf()
x0 = data1.x[0]
ax=[]
ax.append(fig.add_subplot(221))
ax[-1].plot( (data1.x-x0)/1000, data1.z,'ko', label='data', zorder=0)
ax[-1].plot((S[-1]['m']['z0'].x-x0)/1000, S[-1]['m']['z0'].z0[0,:],'r',linewidth=1.5, label='z0,tight constraint', zorder=2)
ax[-1].plot((S[0]['m']['z0'].x-x0)/1000, S[0]['m']['z0'].z0[0,:],'b', linewidth=1.5, label='z0, loose constraint', zorder=1)
ax[-1].set_ylabel('height, m')
ax[-1].set_xlabel('x_atc, km')
ax[-1].legend();
ax.append(fig.add_subplot(222, sharex=ax[0]))
ax[-1].plot((d_ed[-1].x-x0)/1000, (d_ed[-1].z-d_ed[-1].z_est),'r.', label='tight constraint')
ax[-1].plot((d_ed[0].x-x0)/1000, (d_ed[0].z-d_ed[0].z_est),'b.', label='loose constraint')
ax[-1].set_ylabel('residual, m')
ax[-1].set_xlabel('x_atc, km')
ax[-1].legend()
ax.append(fig.add_subplot(223, sharex=ax[0]))
ax[-1].plot((d_ed[-1].x-x0)/1000, (d_ed[-1].z-d_ed[-1].z_est)/d_ed[-1].sigma,'r.', label='tight constraint')
ax[-1].plot((d_ed[0].x-x0)/1000, (d_ed[0].z-d_ed[0].z_est)/d_ed[0].sigma,'b.', label='loose constraint')
ax[-1].set_ylabel('scaled residual')
ax[-1].set_xlabel('x_atc, km')
ax.append( fig.add_subplot(224))
ax[-1].loglog(scale_vals*E_d2z0dx2, sigma_hat_s_vals, label='robust scaled residual')
ax[-1].loglog(scale_vals*E_d2z0dx2, N_vals/S[0]['data'].size, label='fraction pts used')
this_x0=safe_interp( 1.,sigma_hat_s_vals, scale_vals*E_d2z0dx2)
yl=ax[-1].get_ylim()
ax[-1].plot(this_x0*np.ones(2), hax[1].get_ylim(), 'k--', linewidth=2, label='$\sigma_{xx}$'+f'={this_x0:2.2e}')
ax[-1].set_ylim(yl)
ax[-1].legend()
ax[-1].set_xlabel('$\sigma_{xx}$')
fig.tight_layout()
fig.savefig('ATL11_z0_param_selection.tif', format='tif')
! ls /home/besmith4/nobackup/MOG/MOG_500.tif
MOG=pc.grid.data().from_geotif('/Data/MOG/mog1km_2005_hp1_v1.1.tif');
MOG.show()
D1=D11[(D11.x_atc[:,0] > XR[0]) & (D11.x_atc[:,0] < XR[0]+W['x'])]
D1.get_xy(EPSG=3413)
plt.plot(D1.x, D1.y,'r.')
np.max((scale_vals*E_d2z0dx2)[sigma_hat_s_vals>1])
def read_ATL11_file(file, mask_file):
D11=pc.ATL11.data().from_h5(file)
with h5py.File(file,'r') as h5f:
qs=np.array(h5f['/pt2/ref_surf/quality_summary'])
D11.assign({'ref_surf_quality':qs})
D11.get_xy(EPSG=3413)
XR=np.array([np.nanmin(D11.x), np.nanmax(D11.x)])
YR=np.array([np.nanmin(D11.y), np.nanmax(D11.y)])
mask=pc.grid.data().from_geotif(mask_file, bounds=[XR, YR]).interp(D11.x[:,0], D11.y[:,0]) > 0.5
D11.index(mask & (D11.ref_surf_quality <1))
return D11
def find_best_wxx0(D11):
scale_vals=np.array([ 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100, 300])
E_d3zdx2dt=0.0001
E_d2z0dx2=0.006
E_d2zdt2=5000
data_gap_scale=2500
# define the domain's width in x, y, and time
W={'x':4.e4,'y':200,'t':.2}
# define the grid center:
XR=np.nanmean(D11.x_atc)+np.array([-1, 1])*W['x']/2
ctr={'x':XR[0]+W['x']/2., 'y':0., 't':0.}
# define the grid spacing
spacing={'z0':100, 'dz':100, 'dt':.1}
dN=np.ceil(W['x']/30).astype(int)
L_interp={}
for pt0 in np.arange(D11.ref_pt[0,0]+dN/2, D11.ref_pt[-1,0], dN):
ii=np.flatnonzero(np.abs(D11.ref_pt[:,0]-pt0)<3*dN/2)
N_good=np.sum(np.isfinite(D11.h_corr[ii,:]), axis=0)
if np.max(N_good)<0.9*dN:
continue
bc=np.argmax(N_good)
nb=N_good[bc]
xy_ctr=[np.nanmean(D11.x[ii, bc]), np.nanmean(D11.y[ii, bc]), np.nanmean(D11.h_corr[ii, bc])]
D=pc.data().from_dict({'x':D11.x_atc[ii,bc], 'y':np.zeros_like(ii, dtype=float),'z':D11.h_corr[ii,bc],\
'time':np.zeros_like(ii, dtype=float), 'sigma':D11.h_corr_sigma[ii,bc]})
D.index(np.isfinite(D.z) & np.isfinite(D.sigma) & (D.sigma>0))
S=[]
ctr={'x':np.nanmean(D.x), 'y':0., 't':0.}
L_curve={key:[] for key in ['wzz0', 'sigma_hat_s', 'N']}
for scale_val in scale_vals:
# run the fit
E_RMS={'d2z0_dx2': E_d2z0dx2*scale_val,
'dz0_dx': E_d2z0dx2*data_gap_scale*scale_val,
'd3z_dx2dt':E_d3zdx2dt ,
'd2z_dxdt': E_d3zdx2dt*data_gap_scale,
'd2z_dt2': E_d2zdt2}
S.append(smooth_xytb_fit(data=D, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS,
reference_epoch=1, N_subset=None, compute_E=False,
max_iterations=5,
VERBOSE=False, dzdt_lags=[1]))
d_ed = S[-1]['data']
d_ed.index(d_ed.three_sigma_edit==1)
L_curve['sigma_hat_s'].append( LSsurf.RDE((d_ed.z-d_ed.z_est)/d_ed.sigma))
L_curve['wzz0'].append(E_RMS['d2z0_dx2'])
L_curve['N'].append(d_ed.size)
for key in L_curve.keys():
L_curve[key] = np.array(L_curve[key])
L_interp[pt0] = {"w_for_r_of_1":safe_interp(1, L_curve['sigma_hat_s'], L_curve['wzz0']),
'w_for_r_10pct_above_min':safe_interp(1.1*L_curve['sigma_hat_s'].min(), L_curve['sigma_hat_s'], L_curve['wzz0']),
'x': xy_ctr[0],
'y': xy_ctr[1],
'z': xy_ctr[2]}
return L_interp
! ls '/home/besmith4/nobackup/masks'
D11=read_ATL11_file(sorted_files[10],'/home/besmith4/nobackup/masks/Arctic/GimpIceMask_100m_edited.tif' )
L_interp=find_best_wxx0(D11)
fig=plt.figure(6); plt.clf()
h0=fig.add_subplot(211)
plt.plot(D11.ref_pt[:,0], D11.h_corr,'.')
fig.add_subplot(212, sharex=h0)
plt.plot(np.array(list(L_interp.keys())), np.log10(np.array([L_interp[key]['w_for_r_10_pct_above_min'] for key in L_interp.keys()])),'ks')
plt.plot(np.array(list(L_interp.keys())), np.log10(np.array([L_interp[key]['w_for_r_of_1'] for key in L_interp.keys()])),'r*')
```
## Make a queue of files to analyze
```
mask_file='/home/besmith4/git_repos/surfaceChange/masks/Arctic/U_Texas_ice_mask_2019.tif'
ATL11_dir='/att/nobackup/project/icesat-2/ATL14_processing/ATL11_004/north/'
ATL11_files=glob.glob(ATL11_dir+'/ATL11*01.h5')
themask=pc.grid.data().from_geotif(mask_file)[::5, ::5]
readme=np.zeros(len(ATL11_files), dtype=bool)
in_list=[]
out_list=[]
fail_list=[]
for file_count, file in enumerate(ATL11_files):
try:
with h5py.File(file,'r') as h5f:
Dll=pc.data().from_dict({'latitude':np.array(h5f['/pt2/latitude'])[::10], 'longitude':np.array(h5f['/pt2/longitude'])[::10]})
Dll.get_xy(EPSG=3413)
except Exception as e:
fail_list += [file]
continue
N_good=np.nansum(themask.interp(Dll.x, Dll.y)>0.5)
if N_good > 50:
in_list += [file]
else:
out_list += [file]
import re
re_11=re.compile('\d\d_01.h5')
! mkdir /home/besmith4/nobackup/ATL11_wxx_analysis_10km/
EPSG=3413
N_pairs=3
with open('/home/besmith4/temp/wxx_queue.txt','w') as fh:
for file in in_list:
if re_11.search(file) is None:
continue
out_file = '/home/besmith4/nobackup/ATL11_wxx_analysis_10km/'+os.path.basename(file)
fh.write(f'source activate IS2; python3 ~/git_repos/surfaceChange/scripts//best_wxx0_for_ATL11.py {file} 10000 {mask_file} {out_file} {EPSG} {N_pairs}\n')
mask_file='/home/besmith4/git_repos/surfaceChange/masks/Arctic/U_Texas_ice_mask_2019.tif'
```
## Read in calculated best $W_{xx}$ values
```
out_files=glob.glob('/home/besmith4/nobackup/ATL11_wxx_analysis_10km/*.h5')
D_list=[]
for file in out_files:
Di=pc.data().from_h5(file, field_dict={None:['w_for_r_10pct_above_min','F_data_r_of_1', 'w_for_r_of_1', 'sigma_hat_min', 'x','y','z' ]})
if np.prod(Di.size) > 0:
D_list.append(Di)
D_all=pc.data().from_list(D_list)
D_all.index(np.isfinite(D_all.w_for_r_of_1))
plt.figure(); plt.hist(np.log10(D_all.sigma_hat_min), 100);
D_all.index(D_all.sigma_hat_min < 0.95)
```
w_for_r_10pct_above_min Dataset {13/Inf}
w_for_r_of_1 Dataset {13/Inf}
x Dataset {13/Inf}
y Dataset {13/Inf}
z Dataset {13/Inf}
```
plt.figure()
zw=[[0, 1e-2]]
for h_bin in np.arange(0, 4500, 200):
ii = (D_all.z > h_bin) & (D_all.z < h_bin+200)
zw += [(h_bin+250,sps.scoreatpercentile(D_all.w_for_r_of_1[ii], 50))]
zw=np.c_[zw]
w_of_h_curve=pc.data().from_dict({'w':zw[:,1], 'h':zw[:,0]})
plt.semilogy(D_all.z, D_all.w_for_r_of_1,'.')
plt.plot(w_of_h_curve.h, w_of_h_curve.w,'r')
plt.figure(figsize=[8,4]);
plt.clf()
hax=plt.gcf().subplots(1,2)
ii=np.argsort(D_all.w_for_r_of_1)
plt.sca(hax[0])
plt.scatter(D_all.x[ii], D_all.y[ii], 4, c=np.log10(D_all.w_for_r_of_1[ii]), vmin=-4, vmax=-1.5);
plt.axis('equal')
plt.colorbar( shrink=0.5, extend='both', orientation='horizontal', label='$log_{10} (\sigma_{xx0})$');
hax[1].hist(np.log10(D_all.w_for_r_of_1), np.arange(-4.5, -1.5, 0.1))
hax[1].set_xlabel('$log_{10} (\sigma_{xx0})$')
hax[1].yaxis.set_ticks_position('right')
hax[1].plot(np.log10(0.006)*np.ones(2), hax[1].get_ylim(),'k--')
plt.tight_layout()
def bin_pct(D, els):
return sps.scoreatpercentile(D.w_for_r_of_1[els], 50)
def bin_med(D, els):
return np.exp(np.nanmedian(np.log(D.w_for_r_of_1[els])))
Di=[]
for dx in np.array([-0.5, 0])*1.e4:
for dy in np.array([-0.5, 0])*1.e4:
Di += [pc.apply_bin_fn(D_all, 1.e4, bin_med, xy0=[dx, dy])]
Di=pc.data().from_list(Di)
#plt.figure()
#plt.scatter(Di.x, Di.y, c=np.log10(Di.z), vmin=-4, vmax=1.5)
#plt.gca().set_aspect(1)
Dg=pc.points_to_grid(Di, 5000)
Dn=pc.points_to_grid(Di, 5000, field='count')
fig, hax=plt.subplots(1,2, sharex=True, sharey=True)
ii=np.argsort(D_all.w_for_r_of_1)
#hax[0].scatter(D_all.x[ii], D_all.y[ii], 4, c=np.log10(D_all.w_for_r_of_1[ii]), vmin=-4, vmax=-1.5);
hax[0].imshow(np.log10(Dg.z), origin='lower', extent=Dg.extent, interpolation='nearest', clim=[-4, -1.5])
hax[1].imshow(Dn.z, origin='lower', extent=Dn.extent, interpolation='nearest')
for ax in hax:
ax.set_facecolor('gray')
ax.set_aspect(1)
DEM=pc.grid.data().from_geotif('/home/besmith4/nobackup/DEMs/Arctic/ArcticDEM/arcticdem_mosaic_250m_v3.0.tif', bounds=Dg.bounds())
Dg.assign({'DEM':DEM.interp(Dg.x, Dg.y, gridded=True)})
els=~np.isfinite(Dg.z) & np.isfinite(Dg.DEM)
z1=Dg.z.copy()
w_of_h_curve.h[0]=-100
z1[els]=np.interp(Dg.DEM[els], w_of_h_curve.h, w_of_h_curve.w)
plt.figure(); plt.imshow(np.log10(z1), clim=[-4, -2], origin='lower', extent=Dg.extent, interpolation='nearest')
Dg.z=z1
Dg.to_geotif('/home/besmith4/nobackup/ATL11_wxx_analysis_10km/E_d2z0_dx2.tif', srs_epsg=3413)
ux0=[]
nx0=[]
D_mb0=[]
for dxi in [-1.e4, 0, 1.e4]:
for dyi in [-1.e4, 0, 1.e4]:
x0=np.round((D_all.x+1j*D_all.y-(dxi+1j*dyi))/2.e4)*2.e4+(dxi+1j*dyi)
ux0i=np.unique(x0)
nx0i=np.zeros_like(ux0i, dtype=float)
D_mb0i=np.zeros_like(ux0i, dtype=float)
for ii, xx0 in enumerate(ux0i):
these=x0==xx0
nx0i[ii]=np.sum(these)
D_mb0i[ii]=np.nanmedian(D_all.w_for_r_of_1[these])
ux0 += [ux0i]
nx0 += [nx0i]
D_mb0 += [D_mb0i]
ux0=np.concatenate(ux0)
nx0=np.concatenate(nx0)
D_mb0=np.concatenate(D_mb0)
ux0=ux0[nx0>3]
D_mb0=D_mb0[nx0>3]
nx0=nx0[nx0>3]
_, temp=np.unique(ux0, return_index=True)
ux0=ux0[temp]
D_mb0=D_mb0[temp]
nx0=nx0[temp]
from scipy.stats import scoreatpercentile
#MOG=pc.grid.data().from_geotif('/home/besmith4/nobackup/MOG/MOG_500.tif')
#MOG.z /= 255
#plt.figure()
#MOG.show(cmap='gray')
plt.figure( figsize=[5,3.5]);
plt.clf()
hax=plt.gcf().subplots(1,2)
ii=np.argsort(D_mb0)
plt.sca(hax[0])
MOG.show()
plt.scatter(np.real(ux0[ii]), np.imag(ux0[ii]), 6, c=np.log10(D_mb0[ii]), vmin=-4, vmax=-2.5)
plt.colorbar( shrink=0.5, extend='both', label='$log_{10} (\sigma_{xx})$');
plt.gca().set_xticks([])
plt.gca().set_yticks([])
#orientation='horizontal',
hax[1].hist(np.log10(D_mb0), np.arange(-4.1, -2.5, 0.025))
hax[1].set_xlabel('$log_{10} (\sigma_{xx})$')
#hax[1].yaxis.set_ticks_position('right')
#hax[1].yaxis.tick_right()
this_p98=scoreatpercentile(D_mb0, 98)
hax[1].plot(np.log10(this_p98)*np.ones(2), hax[1].get_ylim(),'k--', label="$\sigma_{xx}$="+f"{this_p98:2.1e}")
hax[1].set_ylabel('count')
hax[1].legend()
plt.tight_layout()
plt.axis('equal');
plt.tight_layout()
plt.gcf().savefig('sigma_xx_map.png', format='png')
plt.gcf()
pwd
from scipy.stats import scoreatpercentile
this_p98/np.sqrt(8)
(1.7e-3/np.sqrt(8))/0.0006
## manipulation of the mask:
E_d2z0 = pc.grid.data().from_geotif('/home/besmith4/nobackup/ATL11_wxx_analysis_10km/E_d2z0_dx2.tif')
plt.figure();
plt.imshow(np.log10(E_d2z0.z), origin='lower', clim=[-4, -2])
plt.gca().set_aspect(1)
mask_file='/home/besmith4/git_repos/surfaceChange/masks/Arctic/U_Texas_ice_mask_2019.tif'
mask_i = pc.grid.data().from_geotif(mask_file).interp(E_d2z0.x, E_d2z0.y, gridded=True)
E_d2z0.z[mask_i==0]=np.NaN
hw=4.e4
bds=E_d2z0.bounds()
print(bds)
for count, bd in enumerate(bds):
bd=[np.floor(bd[0]/hw)*hw, np.ceil(bd[1]/hw)*hw]
bds[count]=bd
print(bds)
xg, yg = [ii.ravel() for ii in np.meshgrid(np.arange(bds[0][0], bds[0][1]+hw, hw), np.arange(bds[1][0], bds[1][1]+hw, hw)) ]
#ctr_ii= (np.mod(xg, 2*hw)==0) & (np.mod(yg, 2*hw)==0)
#ctrs=[xg[ctr_ii], yg[ctr_ii]]
ctrs=[xg, yg]
plt.figure()
plt.imshow(np.log10(E_d2z0.z), extent=E_d2z0.extent, origin='lower', clim=[-4, -2])
plt.gca().set_aspect(1)
plt.plot(ctrs[0], ctrs[1],'r+')
import scipy.stats as sps
pad=np.array([-1, 1])*hw
xyW=[]
for ctr_i in zip(ctrs[0], ctrs[1]):
temp=E_d2z0.copy().crop(ctr_i[0]+pad, ctr_i[1]+pad)
if np.mean(np.isfinite(temp.z))<0.02:
continue
zz=temp.z.ravel()
xyW+= [list(ctr_i) + [sps.scoreatpercentile(zz[np.isfinite(zz)], 95)]]
xyW=np.c_[xyW]
plt.figure(); plt.scatter(xyW[:,0], xyW[:,1], c=np.log10(xyW[:,2]), vmin=-4, vmax=-2)
plt.figure()
pc.points_to_grid(pc.data().from_dict({'x':xyW[:,0], 'y':xyW[:,1], 'z':xyW[:,2]}), 4.e4).show(clim=[0.0001, 0.015])
pc.points_to_grid(pc.data().from_dict({'x':xyW[:,0], 'y':xyW[:,1], 'z':xyW[:,2]}), 4.e4).to_geotif('/home/besmith4/git_repos/surfaceChange/masks/Arctic/GL_Ed2z0dx2.tif', srs_epsg=3413)
M=pc.points_to_grid(pc.data().from_dict({'x':xyW[:,0], 'y':xyW[:,1], 'z':xyW[:,2]}), 4.e4)
M
```
## figure for ATBD
```
M=pc.grid.data().from_geotif('/home/besmith4/git_repos/surfaceChange/masks/Arctic/GL_Ed2z0dx2.tif')
E_d2z0 = pc.grid.data().from_geotif('/home/besmith4/nobackup/ATL11_wxx_analysis_10km_v0/E_d2z0_dx2.tif')
hfig, hax = plt.subplots(1,2, figsize=[6,6])
hax[0].imshow(np.log10(E_d2z0.z), extent=E_d2z0.extent, interpolation='nearest', origin='lower', clim=[-6, -1.5])
h_mappable=hax[1].imshow(np.log10(np.maximum(1.e-4, np.minimum(1.e-2,M.z))), extent=M.extent, interpolation='nearest', origin='lower', clim=[-6, -1.5])
for ax in hax:
ax.set_xticks([])
ax.set_yticks([])
ax.set_facecolor('gray')
hb=plt.colorbar(h_mappable, ax=hax, orientation='horizontal', shrink=0.5, label='$\sigma_{xx}$')
hb.set_ticks(np.log10([1e-5, 1e-4, 1e-3, 1e-2, 1e-1 ]))
hb.set_ticklabels(['$10^{%d}$' % exp for exp in np.arange(-5, 0)])
hax[0].set_title('all $\sigma_{xx}$')
hax[1].set_title('80-km $\sigma_{xx}$')
[f'$10^{exp}$' for exp in np.arange(-5, 0)]
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import numpy as np
from LSsurf import smooth_xytb_fit
import pointCollection as pc
import sparseqr
import glob
import h5py
import os
import LSsurf
import scipy.stats as sps
%matplotlib widget
def safe_interp(x, x0_in, y0_in):
y=np.NaN
if x0_in[-1] < x0_in[0]:
x0=x0_in[::-1]
y0=y0_in[::-1]
else:
x0=x0_in
y0=y0_in
try:
i0=np.argwhere(x0 < x)[-1][0]
i1=np.argwhere(x0 >=x)[0][0]
#print([i0, i1])
#print( x0[[i0, i1]])
#print( y0[[i0, i1]])
y=np.interp(x, x0[[i0, i1]], y0[[i0, i1]])
except Exception:
pass
return y
ATL11_index='/att/nobackup/project/icesat-2/ATL14_processing//ATL11_004/north/index/GeoIndex.h5'
mask_file='/home/besmith4/git_repos/surfaceChange/masks/Arctic/U_Texas_ice_mask_2019.tif'
mask=pc.grid.data().from_geotif(mask_file)
mask.show()
xy0=np.round(np.array([np.mean(ii) for ii in [plt.gca().get_xlim(),plt.gca().get_ylim() ]])/1.e4)*1.e4
pad=np.array([-1.e4, 1.e4])
D11=pc.geoIndex().from_file(ATL11_index).query_xy_box(xy0[0]+pad, xy0[1]+pad)
data_top='/home/besmith4/shared/ATL11_processing/Arctic_003_cycle_03_09/003'
#data_top='/Data/ATL11'
all_files = glob.glob(data_top+'/ATL11*01.h5')
sorted_files = sorted(all_files, key = os.path.getsize, reverse=True)
len(all_files)
file=sorted_files[0]
print(file)
#D11=pc.ATL11.data().from_h5(file)
sorted_files=['/home/besmith4/shared/ATL11_processing/Arctic_003_cycle_03_09/003/ATL11_091703_0309_003_01.h5']
for file in sorted_files[0:1]:
D11=pc.ATL11.data().from_h5(file)
plt.figure(1, figsize=[6, 4]); plt.clf()
plt.subplot(131)
plt.plot(D11.x_atc, D11.h_corr,'.')
plt.subplot(132)
ii = (D11.fit_quality[:,0]==0).ravel()
plt.plot(D11.x_atc[ii,:], D11.h_corr[ii,:],'.')
plt.title(os.path.basename(file))
plt.subplot(133)
plt.plot(D11.x_atc, np.sum(np.isfinite(D11.h_corr), axis=1))
D11.index(D11.fit_quality[:,0] ==0)
# define the domain's width in x, y, and time
W={'x':4.e4,'y':400,'t':.2}
# define the grid center:
XR=np.nanmean(D11.x_atc)+np.array([-1, 1])*W['x']/2
ctr={'x':XR[0]+W['x']/2., 'y':0., 't':0.}
# define the grid spacing
spacing={'z0':100, 'dz':100, 'dt':.1}
D=pc.data().from_dict({'x':D11.x_atc[:,0], 'y':np.zeros_like(D11.x_atc[:,0]),'z':D11.h_corr[:,0],\
'time':np.zeros_like(D11.x_atc[:,0]), 'sigma':D11.h_corr_sigma[:,0]})
# To ensure a time-constant simulation, replicate the data at times -0.5 and 0.5:
#data=pc.data().from_list([D, D.copy().assign({'time':np.zeros_like(D.x)}), D.copy().assign({'time':np.zeros_like(D.x)+0.5})])
data=D
data.index(np.isfinite(data.z) & np.isfinite(data.sigma) & (data.sigma>0))
! gdalsrsinfo -o proj4 EPSG:3413
2+2
# define the expected statistics of the surface
from LSsurf import smooth_xytb_fit
E_d3zdx2dt=0.0001
E_d2z0dx2=0.006
E_d2zdt2=5000
data_gap_scale=2500
data1=data[(data.x > XR[0]) & (data.x < XR[0]+W['x'])]
srs_proj4='+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs '
mask_file='/home/besmith4/nobackup/masks/Arctic/GimpIceMask_100m_edited.tif'
scale_vals=np.array([ 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 10, 100])[::-1]
sigma_hat_vals=np.zeros_like(scale_vals)
sigma_hat_s_vals=np.zeros_like(scale_vals)
N_vals=np.zeros_like(scale_vals)
S=[]
d_ed=[]
for ii, scale_val in enumerate(scale_vals):
print(scale_val)
# run the fit
E_RMS={'d2z0_dx2': E_d2z0dx2*scale_val,
'dz0_dx': E_d2z0dx2*data_gap_scale*scale_val,
'd3z_dx2dt':E_d3zdx2dt ,
'd2z_dxdt': E_d3zdx2dt*data_gap_scale,
'd2z_dt2': E_d2zdt2}
srs_proj4=None
S.append(smooth_xytb_fit(data=data1, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS,
reference_epoch=1, N_subset=None, compute_E=False,
max_iterations=5,
VERBOSE=False, dzdt_lags=[1]))
d_ed.append(S[-1]['data'])
d_ed[-1].index(d_ed[-1].three_sigma_edit==1)
sigma_hat_vals[ii] = LSsurf.RDE(d_ed[-1].z-d_ed[-1].z_est)
N_vals[ii]=d_ed[-1].size
sigma_hat_s_vals[ii] = LSsurf.RDE((d_ed[-1].z-d_ed[-1].z_est)/d_ed[-1].sigma)
# plot the results
fig=plt.figure( figsize=[6,6])
fig.clf()
x0 = data1.x[0]
ax=[]
ax.append(fig.add_subplot(221))
ax[-1].plot( (data1.x-x0)/1000, data1.z,'ko', label='data', zorder=0)
ax[-1].plot((S[-1]['m']['z0'].x-x0)/1000, S[-1]['m']['z0'].z0[0,:],'r',linewidth=1.5, label='z0,tight constraint', zorder=2)
ax[-1].plot((S[0]['m']['z0'].x-x0)/1000, S[0]['m']['z0'].z0[0,:],'b', linewidth=1.5, label='z0, loose constraint', zorder=1)
ax[-1].set_ylabel('height, m')
ax[-1].set_xlabel('x_atc, km')
ax[-1].legend();
ax.append(fig.add_subplot(222, sharex=ax[0]))
ax[-1].plot((d_ed[-1].x-x0)/1000, (d_ed[-1].z-d_ed[-1].z_est),'r.', label='tight constraint')
ax[-1].plot((d_ed[0].x-x0)/1000, (d_ed[0].z-d_ed[0].z_est),'b.', label='loose constraint')
ax[-1].set_ylabel('residual, m')
ax[-1].set_xlabel('x_atc, km')
ax[-1].legend()
ax.append(fig.add_subplot(223, sharex=ax[0]))
ax[-1].plot((d_ed[-1].x-x0)/1000, (d_ed[-1].z-d_ed[-1].z_est)/d_ed[-1].sigma,'r.', label='tight constraint')
ax[-1].plot((d_ed[0].x-x0)/1000, (d_ed[0].z-d_ed[0].z_est)/d_ed[0].sigma,'b.', label='loose constraint')
ax[-1].set_ylabel('scaled residual')
ax[-1].set_xlabel('x_atc, km')
ax.append( fig.add_subplot(224))
ax[-1].loglog(scale_vals*E_d2z0dx2, sigma_hat_s_vals, label='robust scaled residual')
ax[-1].loglog(scale_vals*E_d2z0dx2, N_vals/S[0]['data'].size, label='fraction pts used')
this_x0=safe_interp( 1.,sigma_hat_s_vals, scale_vals*E_d2z0dx2)
yl=ax[-1].get_ylim()
ax[-1].plot(this_x0*np.ones(2), hax[1].get_ylim(), 'k--', linewidth=2, label='$\sigma_{xx}$'+f'={this_x0:2.2e}')
ax[-1].set_ylim(yl)
ax[-1].legend()
ax[-1].set_xlabel('$\sigma_{xx}$')
fig.tight_layout()
fig.savefig('ATL11_z0_param_selection.tif', format='tif')
! ls /home/besmith4/nobackup/MOG/MOG_500.tif
MOG=pc.grid.data().from_geotif('/Data/MOG/mog1km_2005_hp1_v1.1.tif');
MOG.show()
D1=D11[(D11.x_atc[:,0] > XR[0]) & (D11.x_atc[:,0] < XR[0]+W['x'])]
D1.get_xy(EPSG=3413)
plt.plot(D1.x, D1.y,'r.')
np.max((scale_vals*E_d2z0dx2)[sigma_hat_s_vals>1])
def read_ATL11_file(file, mask_file):
D11=pc.ATL11.data().from_h5(file)
with h5py.File(file,'r') as h5f:
qs=np.array(h5f['/pt2/ref_surf/quality_summary'])
D11.assign({'ref_surf_quality':qs})
D11.get_xy(EPSG=3413)
XR=np.array([np.nanmin(D11.x), np.nanmax(D11.x)])
YR=np.array([np.nanmin(D11.y), np.nanmax(D11.y)])
mask=pc.grid.data().from_geotif(mask_file, bounds=[XR, YR]).interp(D11.x[:,0], D11.y[:,0]) > 0.5
D11.index(mask & (D11.ref_surf_quality <1))
return D11
def find_best_wxx0(D11):
scale_vals=np.array([ 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100, 300])
E_d3zdx2dt=0.0001
E_d2z0dx2=0.006
E_d2zdt2=5000
data_gap_scale=2500
# define the domain's width in x, y, and time
W={'x':4.e4,'y':200,'t':.2}
# define the grid center:
XR=np.nanmean(D11.x_atc)+np.array([-1, 1])*W['x']/2
ctr={'x':XR[0]+W['x']/2., 'y':0., 't':0.}
# define the grid spacing
spacing={'z0':100, 'dz':100, 'dt':.1}
dN=np.ceil(W['x']/30).astype(int)
L_interp={}
for pt0 in np.arange(D11.ref_pt[0,0]+dN/2, D11.ref_pt[-1,0], dN):
ii=np.flatnonzero(np.abs(D11.ref_pt[:,0]-pt0)<3*dN/2)
N_good=np.sum(np.isfinite(D11.h_corr[ii,:]), axis=0)
if np.max(N_good)<0.9*dN:
continue
bc=np.argmax(N_good)
nb=N_good[bc]
xy_ctr=[np.nanmean(D11.x[ii, bc]), np.nanmean(D11.y[ii, bc]), np.nanmean(D11.h_corr[ii, bc])]
D=pc.data().from_dict({'x':D11.x_atc[ii,bc], 'y':np.zeros_like(ii, dtype=float),'z':D11.h_corr[ii,bc],\
'time':np.zeros_like(ii, dtype=float), 'sigma':D11.h_corr_sigma[ii,bc]})
D.index(np.isfinite(D.z) & np.isfinite(D.sigma) & (D.sigma>0))
S=[]
ctr={'x':np.nanmean(D.x), 'y':0., 't':0.}
L_curve={key:[] for key in ['wzz0', 'sigma_hat_s', 'N']}
for scale_val in scale_vals:
# run the fit
E_RMS={'d2z0_dx2': E_d2z0dx2*scale_val,
'dz0_dx': E_d2z0dx2*data_gap_scale*scale_val,
'd3z_dx2dt':E_d3zdx2dt ,
'd2z_dxdt': E_d3zdx2dt*data_gap_scale,
'd2z_dt2': E_d2zdt2}
S.append(smooth_xytb_fit(data=D, ctr=ctr, W=W, spacing=spacing, E_RMS=E_RMS,
reference_epoch=1, N_subset=None, compute_E=False,
max_iterations=5,
VERBOSE=False, dzdt_lags=[1]))
d_ed = S[-1]['data']
d_ed.index(d_ed.three_sigma_edit==1)
L_curve['sigma_hat_s'].append( LSsurf.RDE((d_ed.z-d_ed.z_est)/d_ed.sigma))
L_curve['wzz0'].append(E_RMS['d2z0_dx2'])
L_curve['N'].append(d_ed.size)
for key in L_curve.keys():
L_curve[key] = np.array(L_curve[key])
L_interp[pt0] = {"w_for_r_of_1":safe_interp(1, L_curve['sigma_hat_s'], L_curve['wzz0']),
'w_for_r_10pct_above_min':safe_interp(1.1*L_curve['sigma_hat_s'].min(), L_curve['sigma_hat_s'], L_curve['wzz0']),
'x': xy_ctr[0],
'y': xy_ctr[1],
'z': xy_ctr[2]}
return L_interp
! ls '/home/besmith4/nobackup/masks'
D11=read_ATL11_file(sorted_files[10],'/home/besmith4/nobackup/masks/Arctic/GimpIceMask_100m_edited.tif' )
L_interp=find_best_wxx0(D11)
fig=plt.figure(6); plt.clf()
h0=fig.add_subplot(211)
plt.plot(D11.ref_pt[:,0], D11.h_corr,'.')
fig.add_subplot(212, sharex=h0)
plt.plot(np.array(list(L_interp.keys())), np.log10(np.array([L_interp[key]['w_for_r_10_pct_above_min'] for key in L_interp.keys()])),'ks')
plt.plot(np.array(list(L_interp.keys())), np.log10(np.array([L_interp[key]['w_for_r_of_1'] for key in L_interp.keys()])),'r*')
mask_file='/home/besmith4/git_repos/surfaceChange/masks/Arctic/U_Texas_ice_mask_2019.tif'
ATL11_dir='/att/nobackup/project/icesat-2/ATL14_processing/ATL11_004/north/'
ATL11_files=glob.glob(ATL11_dir+'/ATL11*01.h5')
themask=pc.grid.data().from_geotif(mask_file)[::5, ::5]
readme=np.zeros(len(ATL11_files), dtype=bool)
in_list=[]
out_list=[]
fail_list=[]
for file_count, file in enumerate(ATL11_files):
try:
with h5py.File(file,'r') as h5f:
Dll=pc.data().from_dict({'latitude':np.array(h5f['/pt2/latitude'])[::10], 'longitude':np.array(h5f['/pt2/longitude'])[::10]})
Dll.get_xy(EPSG=3413)
except Exception as e:
fail_list += [file]
continue
N_good=np.nansum(themask.interp(Dll.x, Dll.y)>0.5)
if N_good > 50:
in_list += [file]
else:
out_list += [file]
import re
re_11=re.compile('\d\d_01.h5')
! mkdir /home/besmith4/nobackup/ATL11_wxx_analysis_10km/
EPSG=3413
N_pairs=3
with open('/home/besmith4/temp/wxx_queue.txt','w') as fh:
for file in in_list:
if re_11.search(file) is None:
continue
out_file = '/home/besmith4/nobackup/ATL11_wxx_analysis_10km/'+os.path.basename(file)
fh.write(f'source activate IS2; python3 ~/git_repos/surfaceChange/scripts//best_wxx0_for_ATL11.py {file} 10000 {mask_file} {out_file} {EPSG} {N_pairs}\n')
mask_file='/home/besmith4/git_repos/surfaceChange/masks/Arctic/U_Texas_ice_mask_2019.tif'
out_files=glob.glob('/home/besmith4/nobackup/ATL11_wxx_analysis_10km/*.h5')
D_list=[]
for file in out_files:
Di=pc.data().from_h5(file, field_dict={None:['w_for_r_10pct_above_min','F_data_r_of_1', 'w_for_r_of_1', 'sigma_hat_min', 'x','y','z' ]})
if np.prod(Di.size) > 0:
D_list.append(Di)
D_all=pc.data().from_list(D_list)
D_all.index(np.isfinite(D_all.w_for_r_of_1))
plt.figure(); plt.hist(np.log10(D_all.sigma_hat_min), 100);
D_all.index(D_all.sigma_hat_min < 0.95)
plt.figure()
zw=[[0, 1e-2]]
for h_bin in np.arange(0, 4500, 200):
ii = (D_all.z > h_bin) & (D_all.z < h_bin+200)
zw += [(h_bin+250,sps.scoreatpercentile(D_all.w_for_r_of_1[ii], 50))]
zw=np.c_[zw]
w_of_h_curve=pc.data().from_dict({'w':zw[:,1], 'h':zw[:,0]})
plt.semilogy(D_all.z, D_all.w_for_r_of_1,'.')
plt.plot(w_of_h_curve.h, w_of_h_curve.w,'r')
plt.figure(figsize=[8,4]);
plt.clf()
hax=plt.gcf().subplots(1,2)
ii=np.argsort(D_all.w_for_r_of_1)
plt.sca(hax[0])
plt.scatter(D_all.x[ii], D_all.y[ii], 4, c=np.log10(D_all.w_for_r_of_1[ii]), vmin=-4, vmax=-1.5);
plt.axis('equal')
plt.colorbar( shrink=0.5, extend='both', orientation='horizontal', label='$log_{10} (\sigma_{xx0})$');
hax[1].hist(np.log10(D_all.w_for_r_of_1), np.arange(-4.5, -1.5, 0.1))
hax[1].set_xlabel('$log_{10} (\sigma_{xx0})$')
hax[1].yaxis.set_ticks_position('right')
hax[1].plot(np.log10(0.006)*np.ones(2), hax[1].get_ylim(),'k--')
plt.tight_layout()
def bin_pct(D, els):
return sps.scoreatpercentile(D.w_for_r_of_1[els], 50)
def bin_med(D, els):
return np.exp(np.nanmedian(np.log(D.w_for_r_of_1[els])))
Di=[]
for dx in np.array([-0.5, 0])*1.e4:
for dy in np.array([-0.5, 0])*1.e4:
Di += [pc.apply_bin_fn(D_all, 1.e4, bin_med, xy0=[dx, dy])]
Di=pc.data().from_list(Di)
#plt.figure()
#plt.scatter(Di.x, Di.y, c=np.log10(Di.z), vmin=-4, vmax=1.5)
#plt.gca().set_aspect(1)
Dg=pc.points_to_grid(Di, 5000)
Dn=pc.points_to_grid(Di, 5000, field='count')
fig, hax=plt.subplots(1,2, sharex=True, sharey=True)
ii=np.argsort(D_all.w_for_r_of_1)
#hax[0].scatter(D_all.x[ii], D_all.y[ii], 4, c=np.log10(D_all.w_for_r_of_1[ii]), vmin=-4, vmax=-1.5);
hax[0].imshow(np.log10(Dg.z), origin='lower', extent=Dg.extent, interpolation='nearest', clim=[-4, -1.5])
hax[1].imshow(Dn.z, origin='lower', extent=Dn.extent, interpolation='nearest')
for ax in hax:
ax.set_facecolor('gray')
ax.set_aspect(1)
DEM=pc.grid.data().from_geotif('/home/besmith4/nobackup/DEMs/Arctic/ArcticDEM/arcticdem_mosaic_250m_v3.0.tif', bounds=Dg.bounds())
Dg.assign({'DEM':DEM.interp(Dg.x, Dg.y, gridded=True)})
els=~np.isfinite(Dg.z) & np.isfinite(Dg.DEM)
z1=Dg.z.copy()
w_of_h_curve.h[0]=-100
z1[els]=np.interp(Dg.DEM[els], w_of_h_curve.h, w_of_h_curve.w)
plt.figure(); plt.imshow(np.log10(z1), clim=[-4, -2], origin='lower', extent=Dg.extent, interpolation='nearest')
Dg.z=z1
Dg.to_geotif('/home/besmith4/nobackup/ATL11_wxx_analysis_10km/E_d2z0_dx2.tif', srs_epsg=3413)
ux0=[]
nx0=[]
D_mb0=[]
for dxi in [-1.e4, 0, 1.e4]:
for dyi in [-1.e4, 0, 1.e4]:
x0=np.round((D_all.x+1j*D_all.y-(dxi+1j*dyi))/2.e4)*2.e4+(dxi+1j*dyi)
ux0i=np.unique(x0)
nx0i=np.zeros_like(ux0i, dtype=float)
D_mb0i=np.zeros_like(ux0i, dtype=float)
for ii, xx0 in enumerate(ux0i):
these=x0==xx0
nx0i[ii]=np.sum(these)
D_mb0i[ii]=np.nanmedian(D_all.w_for_r_of_1[these])
ux0 += [ux0i]
nx0 += [nx0i]
D_mb0 += [D_mb0i]
ux0=np.concatenate(ux0)
nx0=np.concatenate(nx0)
D_mb0=np.concatenate(D_mb0)
ux0=ux0[nx0>3]
D_mb0=D_mb0[nx0>3]
nx0=nx0[nx0>3]
_, temp=np.unique(ux0, return_index=True)
ux0=ux0[temp]
D_mb0=D_mb0[temp]
nx0=nx0[temp]
from scipy.stats import scoreatpercentile
#MOG=pc.grid.data().from_geotif('/home/besmith4/nobackup/MOG/MOG_500.tif')
#MOG.z /= 255
#plt.figure()
#MOG.show(cmap='gray')
plt.figure( figsize=[5,3.5]);
plt.clf()
hax=plt.gcf().subplots(1,2)
ii=np.argsort(D_mb0)
plt.sca(hax[0])
MOG.show()
plt.scatter(np.real(ux0[ii]), np.imag(ux0[ii]), 6, c=np.log10(D_mb0[ii]), vmin=-4, vmax=-2.5)
plt.colorbar( shrink=0.5, extend='both', label='$log_{10} (\sigma_{xx})$');
plt.gca().set_xticks([])
plt.gca().set_yticks([])
#orientation='horizontal',
hax[1].hist(np.log10(D_mb0), np.arange(-4.1, -2.5, 0.025))
hax[1].set_xlabel('$log_{10} (\sigma_{xx})$')
#hax[1].yaxis.set_ticks_position('right')
#hax[1].yaxis.tick_right()
this_p98=scoreatpercentile(D_mb0, 98)
hax[1].plot(np.log10(this_p98)*np.ones(2), hax[1].get_ylim(),'k--', label="$\sigma_{xx}$="+f"{this_p98:2.1e}")
hax[1].set_ylabel('count')
hax[1].legend()
plt.tight_layout()
plt.axis('equal');
plt.tight_layout()
plt.gcf().savefig('sigma_xx_map.png', format='png')
plt.gcf()
pwd
from scipy.stats import scoreatpercentile
this_p98/np.sqrt(8)
(1.7e-3/np.sqrt(8))/0.0006
## manipulation of the mask:
E_d2z0 = pc.grid.data().from_geotif('/home/besmith4/nobackup/ATL11_wxx_analysis_10km/E_d2z0_dx2.tif')
plt.figure();
plt.imshow(np.log10(E_d2z0.z), origin='lower', clim=[-4, -2])
plt.gca().set_aspect(1)
mask_file='/home/besmith4/git_repos/surfaceChange/masks/Arctic/U_Texas_ice_mask_2019.tif'
mask_i = pc.grid.data().from_geotif(mask_file).interp(E_d2z0.x, E_d2z0.y, gridded=True)
E_d2z0.z[mask_i==0]=np.NaN
hw=4.e4
bds=E_d2z0.bounds()
print(bds)
for count, bd in enumerate(bds):
bd=[np.floor(bd[0]/hw)*hw, np.ceil(bd[1]/hw)*hw]
bds[count]=bd
print(bds)
xg, yg = [ii.ravel() for ii in np.meshgrid(np.arange(bds[0][0], bds[0][1]+hw, hw), np.arange(bds[1][0], bds[1][1]+hw, hw)) ]
#ctr_ii= (np.mod(xg, 2*hw)==0) & (np.mod(yg, 2*hw)==0)
#ctrs=[xg[ctr_ii], yg[ctr_ii]]
ctrs=[xg, yg]
plt.figure()
plt.imshow(np.log10(E_d2z0.z), extent=E_d2z0.extent, origin='lower', clim=[-4, -2])
plt.gca().set_aspect(1)
plt.plot(ctrs[0], ctrs[1],'r+')
import scipy.stats as sps
pad=np.array([-1, 1])*hw
xyW=[]
for ctr_i in zip(ctrs[0], ctrs[1]):
temp=E_d2z0.copy().crop(ctr_i[0]+pad, ctr_i[1]+pad)
if np.mean(np.isfinite(temp.z))<0.02:
continue
zz=temp.z.ravel()
xyW+= [list(ctr_i) + [sps.scoreatpercentile(zz[np.isfinite(zz)], 95)]]
xyW=np.c_[xyW]
plt.figure(); plt.scatter(xyW[:,0], xyW[:,1], c=np.log10(xyW[:,2]), vmin=-4, vmax=-2)
plt.figure()
pc.points_to_grid(pc.data().from_dict({'x':xyW[:,0], 'y':xyW[:,1], 'z':xyW[:,2]}), 4.e4).show(clim=[0.0001, 0.015])
pc.points_to_grid(pc.data().from_dict({'x':xyW[:,0], 'y':xyW[:,1], 'z':xyW[:,2]}), 4.e4).to_geotif('/home/besmith4/git_repos/surfaceChange/masks/Arctic/GL_Ed2z0dx2.tif', srs_epsg=3413)
M=pc.points_to_grid(pc.data().from_dict({'x':xyW[:,0], 'y':xyW[:,1], 'z':xyW[:,2]}), 4.e4)
M
M=pc.grid.data().from_geotif('/home/besmith4/git_repos/surfaceChange/masks/Arctic/GL_Ed2z0dx2.tif')
E_d2z0 = pc.grid.data().from_geotif('/home/besmith4/nobackup/ATL11_wxx_analysis_10km_v0/E_d2z0_dx2.tif')
hfig, hax = plt.subplots(1,2, figsize=[6,6])
hax[0].imshow(np.log10(E_d2z0.z), extent=E_d2z0.extent, interpolation='nearest', origin='lower', clim=[-6, -1.5])
h_mappable=hax[1].imshow(np.log10(np.maximum(1.e-4, np.minimum(1.e-2,M.z))), extent=M.extent, interpolation='nearest', origin='lower', clim=[-6, -1.5])
for ax in hax:
ax.set_xticks([])
ax.set_yticks([])
ax.set_facecolor('gray')
hb=plt.colorbar(h_mappable, ax=hax, orientation='horizontal', shrink=0.5, label='$\sigma_{xx}$')
hb.set_ticks(np.log10([1e-5, 1e-4, 1e-3, 1e-2, 1e-1 ]))
hb.set_ticklabels(['$10^{%d}$' % exp for exp in np.arange(-5, 0)])
hax[0].set_title('all $\sigma_{xx}$')
hax[1].set_title('80-km $\sigma_{xx}$')
[f'$10^{exp}$' for exp in np.arange(-5, 0)]
| 0.176459 | 0.381767 |
#### Data Fetch
```
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
#extracting lines for simplied verion
open('2-fft-malicious-n-0-3-m-14.txt','w').writelines([ line for line in open("2-fft-malicious-n-0-3-m-14.log") if "Enqueue" in line])
print ("done")
#extracting content from lines
csv_out = open('2-fft-malicious-n-0-3-m-14-csv.txt','w')
with open ('2-fft-malicious-n-0-3-m-14.txt', 'rt') as fft:
csv_out.write("time,router,outport,inport,packet_address,packet_type,flit_id,flit_type,vnet,vc,src_ni,src_router,dst_ni,dst_router,enq_time\n")
for line in fft:
line_split = line.split()
time = line_split[line_split.index("time:") + 1]
router = line_split[line_split.index("SwitchAllocator") + 3]
outport = line_split[line_split.index("outport") + 1]
inport = line_split[line_split.index("inport") + 1]
packet_address = line_split[line_split.index("addr") + 2][1:-1]
packet_type = line_split[line_split.index("addr") + 7]
flit_id = line_split[line_split.index("[flit::") + 1][3:]
flit_type = line_split[line_split.index("Id="+str(flit_id)) + 1][5:]
vnet = line_split[line_split.index("Type="+str(flit_type)) + 1][5:]
vc = line_split[line_split.index("Vnet="+str(vnet)) + 1][3:]
src_ni = line_split[line_split.index("VC="+str(vc)) + 2][3:]
src_router = line_split[line_split.index("NI="+str(src_ni)) + 2][7:]
dst_ni = line_split[line_split.index("Router="+str(src_router)) + 2][3:]
dst_router = line_split[line_split.index("NI="+str(dst_ni)) + 2][7:]
enq_time = str(line_split[line_split.index("Enqueue") + 1][5:])
line_csv = time+","+router+","+outport+","+inport+","+packet_address+","+packet_type+","+flit_id+","+flit_type+","+vnet+","+vc+","+src_ni+","+src_router+","+dst_ni+","+dst_router+","+enq_time+"\n"
csv_out.write(line_csv)
print ("done")
#convert txt to csv
df = pd.read_csv("2-fft-malicious-n-0-3-m-14-csv.txt",delimiter=',')
df.to_csv('2-fft-malicious-n-0-3-m-14.csv',index=False)
#dataset
df = pd.read_csv('2-fft-malicious-n-0-3-m-14.csv')
df.shape
df.describe()
sns.distplot(df['router'], kde = False, bins=30, color='blue')
sns.distplot(df['src_router'], kde = False, bins=30, color='blue')
sns.distplot(df['dst_router'], kde = False, bins=30, color='red')
sns.distplot(df['inport'], kde = False, bins=30, color='green')
sns.distplot(df['outport'], kde = False, bins=30, color='green')
sns.distplot(df['packet_type'], kde = False, bins=30, color='red')
direction = {'Local': 0,'North': 1, 'East': 2, 'South':3,'West':4}
df = df.replace({'inport': direction, 'outport': direction})
data = {'GETS': 1,'GETX': 2,'GUX': 3,'DATA': 4, 'PUTX': 5,'PUTS': 6,'WB_ACK':7}
df = df.replace({'packet_type': data})
df['flit_id'] = df['flit_id']+1
df['flit_type'] = df['flit_type']+1
df['vnet'] = df['vnet']+1
df['vc'] = df['vc']+1
hoparr = {"0to0":0,"0to1":1,"0to2":2,"0to3":3,"0to4":1,"0to5":2,"0to6":3,"0to7":4,"0to8":2,"0to9":3,"0to10":4,"0to11":5,"0to12":3,"0to13":4,"0to14":5,"0to15":6,
"1to1":0,"1to2":1,"1to3":2,"1to4":2,"1to5":1,"1to6":2,"1to7":3,"1to8":3,"1to9":2,"1to10":3,"1to11":4,"1to12":5,"1to13":3,"1to14":4,"1to15":5,
"2to2":0,"2to3":1,"2to4":3,"2to5":2,"2to6":1,"2to7":2,"2to8":4,"2to9":3,"2to10":2,"2to11":3,"2to12":5,"2to13":4,"2to14":3,"2to15":4,
"3to3":0,"3to4":4,"3to5":3,"3to6":2,"3to7":1,"3to8":5,"3to9":4,"3to10":3,"3to11":2,"3to12":6,"3to13":5,"3to14":4,"3to15":3,
"4to4":0,"4to5":1,"4to6":2,"4to7":3,"4to8":1,"4to9":2,"4to10":3,"4to11":4,"4to12":2,"4to13":3,"4to14":4,"4to15":5,
"5to5":0,"5to6":1,"5to7":2,"5to8":2,"5to9":1,"5to10":2,"5to11":3,"5to12":3,"5to13":2,"5to14":3,"5to15":4,
"6to6":0,"6to7":1,"6to8":3,"6to9":2,"6to10":1,"6to11":2,"6to12":4,"6to13":3,"6to14":2,"6to15":3,
"7to7":0,"7to8":4,"7to9":3,"7to10":2,"7to11":1,"7to12":5,"7to13":4,"7to14":3,"7to15":2,
"8to8":0,"8to9":1,"8to10":2,"8to11":3,"8to12":1,"8to13":2,"8to14":3,"8to15":4,
"9to9":0,"9to10":1,"9to11":2,"9to12":2,"9to13":1,"9to14":2,"9to15":4,
"10to10":0,"10to11":1,"10to12":3,"10to13":2,"10to14":1,"10to15":2,
"11to11":0,"11to12":4,"11to13":3,"11to14":2,"11to15":1,
"12to12":0,"12to13":1,"12to14":2,"12to15":3,
"13to13":0,"13to14":1,"13to15":2,
"14to14":0,"14to15":1,
"15to15":0}
packarr = {}
packtime = {}
packchunk = []
hopcurrentarr = []
hoptotarr = []
hoppercentarr =[]
waitingarr = []
interval = 500
count = 0
for index, row in df.iterrows():
current_time = row["time"]
enqueue_time = row["enq_time"]
waiting_time = current_time - enqueue_time
waitingarr.append(waiting_time)
current_router = row["router"]
src_router = row["src_router"]
dst_router = row["dst_router"]
src_router_temp = src_router
if src_router_temp>dst_router:
temph = src_router_temp
src_router_temp = dst_router
dst_router = temph
hop_count_string = str(src_router_temp)+"to"+str(dst_router)
src_router_temp = src_router
hop_count = hoparr.get(hop_count_string)
if src_router_temp>current_router:
tempc = src_router_temp
src_router_temp = current_router
current_router = tempc
current_hop_string = str(src_router_temp)+"to"+str(current_router)
current_hop = hoparr.get(current_hop_string)
if(current_hop == 0 and hop_count ==0):
hop_percent = 0
else:
hop_percent = current_hop/hop_count
hoptotarr.append(hop_count)
hopcurrentarr.append(current_hop)
hoppercentarr.append(hop_percent)
if row["packet_address"] not in packarr:
packarr[row["packet_address"]] = count
packtime[row["packet_address"]] = row["time"]
packchunk.append(packarr.get(row["packet_address"]))
count+=1
else:
current_time = row["time"]
position = packarr.get(row["packet_address"])
pkt_time = packtime.get(row["packet_address"])
current_max = max(packarr.values())
if (current_time-pkt_time)<interval:
packchunk.append(packarr.get(row["packet_address"]))
else:
del packarr[row["packet_address"]]
del packtime[row["packet_address"]]
packarr[row["packet_address"]] = current_max+1
packtime[row["packet_address"]] = row["time"]
packchunk.append(packarr.get(row["packet_address"]))
if (current_max)==count:
count+=2
elif (current_max+1)==count:
count+=1
df['packet_address'].nunique()
print(len(packarr))
print(len(packchunk))
df = df.assign(traversal_id=packchunk)
df = df.assign(hop_count=hoptotarr)
df = df.assign(current_hop=hopcurrentarr)
df = df.assign(hop_percentage=hoppercentarr)
df = df.assign(enqueue_time=waitingarr)
df.rename(columns={'packet_type': 'cache_coherence_type', 'time': 'timestamp'}, inplace=True)
df = df.drop(columns=['packet_address','enq_time'])
df.isnull().sum()
df.dtypes
df.to_csv('2-fft-malicious-n-0-3-m-14.csv',index=False)
```
#### Router Fetch
```
df = pd.read_csv('2-fft-malicious-n-0-3-m-14.csv')
df.shape
df = df.loc[df['router'] == 15]
df = df.drop(columns=['router'])
df.to_csv('2-fft-malicious-n-0-3-m-14-r15.csv',index=False)
df = pd.read_csv('2-fft-malicious-n-0-3-m-14-r15.csv')
def timecount(df):
timearr = []
interval = 99
count = 0
for index, row in df.iterrows():
if row["timestamp"]<=interval:
count+=1
else:
timearr.append([interval+1,count])
count=1
interval+=100
timearr.append([interval+1,count])
return timearr
def maxcount(timearr,df):
countarr = []
increarr = []
maxarr = []
for i in range(len(timearr)):
for cnt in range(timearr[i][1],0,-1):
countarr.append(cnt)
maxarr.append(timearr[i][1])
increment = timearr[i][1] - cnt + 1
increarr.append(increment)
df = df.assign(packet_count_decr=countarr)
df = df.assign(packet_count_incr=increarr)
df = df.assign(max_packet_count=maxarr)
return df
df = maxcount(timecount(df),df)
df
def rename(df):
df['traversal_id'] = df['traversal_id']+1
df["packet_count_index"] = df["packet_count_decr"]*df["packet_count_incr"]
df["packet_max_index"] = df["packet_count_index"]*df["max_packet_count"]
df["port_index"] = df["outport"]*df["inport"]
df["cache_coherence_flit_index"] = df["cache_coherence_type"]*df["flit_id"]
df["flit_index"] = df["cache_coherence_flit_index"]*df["flit_type"]
df["traversal_index"] = df["flit_index"]*df["traversal_id"]
df["cache_coherence_vnet_index"] = df["cache_coherence_type"]*df["vnet"]
df["vnet_vc_index"] = df["vnet"]*df["vc"]
df["vnet_vc_cc_index"] = df["vnet"]*df["cache_coherence_vnet_index"]
df.head(50)
rename(df)
df['target'] = 0
df
df.dtypes
df.to_csv('2-fft-malicious-n-0-3-m-14-r15.csv',index=False)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
#extracting lines for simplied verion
open('2-fft-malicious-n-0-3-m-14.txt','w').writelines([ line for line in open("2-fft-malicious-n-0-3-m-14.log") if "Enqueue" in line])
print ("done")
#extracting content from lines
csv_out = open('2-fft-malicious-n-0-3-m-14-csv.txt','w')
with open ('2-fft-malicious-n-0-3-m-14.txt', 'rt') as fft:
csv_out.write("time,router,outport,inport,packet_address,packet_type,flit_id,flit_type,vnet,vc,src_ni,src_router,dst_ni,dst_router,enq_time\n")
for line in fft:
line_split = line.split()
time = line_split[line_split.index("time:") + 1]
router = line_split[line_split.index("SwitchAllocator") + 3]
outport = line_split[line_split.index("outport") + 1]
inport = line_split[line_split.index("inport") + 1]
packet_address = line_split[line_split.index("addr") + 2][1:-1]
packet_type = line_split[line_split.index("addr") + 7]
flit_id = line_split[line_split.index("[flit::") + 1][3:]
flit_type = line_split[line_split.index("Id="+str(flit_id)) + 1][5:]
vnet = line_split[line_split.index("Type="+str(flit_type)) + 1][5:]
vc = line_split[line_split.index("Vnet="+str(vnet)) + 1][3:]
src_ni = line_split[line_split.index("VC="+str(vc)) + 2][3:]
src_router = line_split[line_split.index("NI="+str(src_ni)) + 2][7:]
dst_ni = line_split[line_split.index("Router="+str(src_router)) + 2][3:]
dst_router = line_split[line_split.index("NI="+str(dst_ni)) + 2][7:]
enq_time = str(line_split[line_split.index("Enqueue") + 1][5:])
line_csv = time+","+router+","+outport+","+inport+","+packet_address+","+packet_type+","+flit_id+","+flit_type+","+vnet+","+vc+","+src_ni+","+src_router+","+dst_ni+","+dst_router+","+enq_time+"\n"
csv_out.write(line_csv)
print ("done")
#convert txt to csv
df = pd.read_csv("2-fft-malicious-n-0-3-m-14-csv.txt",delimiter=',')
df.to_csv('2-fft-malicious-n-0-3-m-14.csv',index=False)
#dataset
df = pd.read_csv('2-fft-malicious-n-0-3-m-14.csv')
df.shape
df.describe()
sns.distplot(df['router'], kde = False, bins=30, color='blue')
sns.distplot(df['src_router'], kde = False, bins=30, color='blue')
sns.distplot(df['dst_router'], kde = False, bins=30, color='red')
sns.distplot(df['inport'], kde = False, bins=30, color='green')
sns.distplot(df['outport'], kde = False, bins=30, color='green')
sns.distplot(df['packet_type'], kde = False, bins=30, color='red')
direction = {'Local': 0,'North': 1, 'East': 2, 'South':3,'West':4}
df = df.replace({'inport': direction, 'outport': direction})
data = {'GETS': 1,'GETX': 2,'GUX': 3,'DATA': 4, 'PUTX': 5,'PUTS': 6,'WB_ACK':7}
df = df.replace({'packet_type': data})
df['flit_id'] = df['flit_id']+1
df['flit_type'] = df['flit_type']+1
df['vnet'] = df['vnet']+1
df['vc'] = df['vc']+1
hoparr = {"0to0":0,"0to1":1,"0to2":2,"0to3":3,"0to4":1,"0to5":2,"0to6":3,"0to7":4,"0to8":2,"0to9":3,"0to10":4,"0to11":5,"0to12":3,"0to13":4,"0to14":5,"0to15":6,
"1to1":0,"1to2":1,"1to3":2,"1to4":2,"1to5":1,"1to6":2,"1to7":3,"1to8":3,"1to9":2,"1to10":3,"1to11":4,"1to12":5,"1to13":3,"1to14":4,"1to15":5,
"2to2":0,"2to3":1,"2to4":3,"2to5":2,"2to6":1,"2to7":2,"2to8":4,"2to9":3,"2to10":2,"2to11":3,"2to12":5,"2to13":4,"2to14":3,"2to15":4,
"3to3":0,"3to4":4,"3to5":3,"3to6":2,"3to7":1,"3to8":5,"3to9":4,"3to10":3,"3to11":2,"3to12":6,"3to13":5,"3to14":4,"3to15":3,
"4to4":0,"4to5":1,"4to6":2,"4to7":3,"4to8":1,"4to9":2,"4to10":3,"4to11":4,"4to12":2,"4to13":3,"4to14":4,"4to15":5,
"5to5":0,"5to6":1,"5to7":2,"5to8":2,"5to9":1,"5to10":2,"5to11":3,"5to12":3,"5to13":2,"5to14":3,"5to15":4,
"6to6":0,"6to7":1,"6to8":3,"6to9":2,"6to10":1,"6to11":2,"6to12":4,"6to13":3,"6to14":2,"6to15":3,
"7to7":0,"7to8":4,"7to9":3,"7to10":2,"7to11":1,"7to12":5,"7to13":4,"7to14":3,"7to15":2,
"8to8":0,"8to9":1,"8to10":2,"8to11":3,"8to12":1,"8to13":2,"8to14":3,"8to15":4,
"9to9":0,"9to10":1,"9to11":2,"9to12":2,"9to13":1,"9to14":2,"9to15":4,
"10to10":0,"10to11":1,"10to12":3,"10to13":2,"10to14":1,"10to15":2,
"11to11":0,"11to12":4,"11to13":3,"11to14":2,"11to15":1,
"12to12":0,"12to13":1,"12to14":2,"12to15":3,
"13to13":0,"13to14":1,"13to15":2,
"14to14":0,"14to15":1,
"15to15":0}
packarr = {}
packtime = {}
packchunk = []
hopcurrentarr = []
hoptotarr = []
hoppercentarr =[]
waitingarr = []
interval = 500
count = 0
for index, row in df.iterrows():
current_time = row["time"]
enqueue_time = row["enq_time"]
waiting_time = current_time - enqueue_time
waitingarr.append(waiting_time)
current_router = row["router"]
src_router = row["src_router"]
dst_router = row["dst_router"]
src_router_temp = src_router
if src_router_temp>dst_router:
temph = src_router_temp
src_router_temp = dst_router
dst_router = temph
hop_count_string = str(src_router_temp)+"to"+str(dst_router)
src_router_temp = src_router
hop_count = hoparr.get(hop_count_string)
if src_router_temp>current_router:
tempc = src_router_temp
src_router_temp = current_router
current_router = tempc
current_hop_string = str(src_router_temp)+"to"+str(current_router)
current_hop = hoparr.get(current_hop_string)
if(current_hop == 0 and hop_count ==0):
hop_percent = 0
else:
hop_percent = current_hop/hop_count
hoptotarr.append(hop_count)
hopcurrentarr.append(current_hop)
hoppercentarr.append(hop_percent)
if row["packet_address"] not in packarr:
packarr[row["packet_address"]] = count
packtime[row["packet_address"]] = row["time"]
packchunk.append(packarr.get(row["packet_address"]))
count+=1
else:
current_time = row["time"]
position = packarr.get(row["packet_address"])
pkt_time = packtime.get(row["packet_address"])
current_max = max(packarr.values())
if (current_time-pkt_time)<interval:
packchunk.append(packarr.get(row["packet_address"]))
else:
del packarr[row["packet_address"]]
del packtime[row["packet_address"]]
packarr[row["packet_address"]] = current_max+1
packtime[row["packet_address"]] = row["time"]
packchunk.append(packarr.get(row["packet_address"]))
if (current_max)==count:
count+=2
elif (current_max+1)==count:
count+=1
df['packet_address'].nunique()
print(len(packarr))
print(len(packchunk))
df = df.assign(traversal_id=packchunk)
df = df.assign(hop_count=hoptotarr)
df = df.assign(current_hop=hopcurrentarr)
df = df.assign(hop_percentage=hoppercentarr)
df = df.assign(enqueue_time=waitingarr)
df.rename(columns={'packet_type': 'cache_coherence_type', 'time': 'timestamp'}, inplace=True)
df = df.drop(columns=['packet_address','enq_time'])
df.isnull().sum()
df.dtypes
df.to_csv('2-fft-malicious-n-0-3-m-14.csv',index=False)
df = pd.read_csv('2-fft-malicious-n-0-3-m-14.csv')
df.shape
df = df.loc[df['router'] == 15]
df = df.drop(columns=['router'])
df.to_csv('2-fft-malicious-n-0-3-m-14-r15.csv',index=False)
df = pd.read_csv('2-fft-malicious-n-0-3-m-14-r15.csv')
def timecount(df):
timearr = []
interval = 99
count = 0
for index, row in df.iterrows():
if row["timestamp"]<=interval:
count+=1
else:
timearr.append([interval+1,count])
count=1
interval+=100
timearr.append([interval+1,count])
return timearr
def maxcount(timearr,df):
countarr = []
increarr = []
maxarr = []
for i in range(len(timearr)):
for cnt in range(timearr[i][1],0,-1):
countarr.append(cnt)
maxarr.append(timearr[i][1])
increment = timearr[i][1] - cnt + 1
increarr.append(increment)
df = df.assign(packet_count_decr=countarr)
df = df.assign(packet_count_incr=increarr)
df = df.assign(max_packet_count=maxarr)
return df
df = maxcount(timecount(df),df)
df
def rename(df):
df['traversal_id'] = df['traversal_id']+1
df["packet_count_index"] = df["packet_count_decr"]*df["packet_count_incr"]
df["packet_max_index"] = df["packet_count_index"]*df["max_packet_count"]
df["port_index"] = df["outport"]*df["inport"]
df["cache_coherence_flit_index"] = df["cache_coherence_type"]*df["flit_id"]
df["flit_index"] = df["cache_coherence_flit_index"]*df["flit_type"]
df["traversal_index"] = df["flit_index"]*df["traversal_id"]
df["cache_coherence_vnet_index"] = df["cache_coherence_type"]*df["vnet"]
df["vnet_vc_index"] = df["vnet"]*df["vc"]
df["vnet_vc_cc_index"] = df["vnet"]*df["cache_coherence_vnet_index"]
df.head(50)
rename(df)
df['target'] = 0
df
df.dtypes
df.to_csv('2-fft-malicious-n-0-3-m-14-r15.csv',index=False)
| 0.076779 | 0.523055 |
# Notebook para o PAN - Atribuição Autoral - 2018
```
%matplotlib inline
#python basic libs
from __future__ import print_function
from tempfile import mkdtemp
from shutil import rmtree
import os;
from os.path import join as pathjoin;
import re;
import glob;
import json;
import codecs;
from collections import defaultdict;
import pprint;
import warnings;
from pprint import pprint
from time import time
import logging
#data analysis libs
import numpy as np;
import pandas as pd;
import seaborn as sn;
import matplotlib.pyplot as plt;
import random;
#machine learning libs
#feature extraction
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
#preprocessing and transformation
from sklearn.preprocessing import normalize, MaxAbsScaler, MinMaxScaler;
from sklearn.preprocessing import LabelBinarizer, LabelEncoder;
from sklearn.decomposition import PCA;
from sklearn.metrics.pairwise import cosine_similarity;
from sklearn.base import BaseEstimator, ClassifierMixin
#classifiers
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
#model valuation
from sklearn.model_selection import train_test_split;
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, accuracy_score;
import seaborn as sns;
sns.set(color_codes=True);
from pandas.plotting import scatter_matrix
import platform; print(platform.platform())
print("NumPy", np.__version__)
import scipy; print("SciPy", scipy.__version__)
import sklearn; print("Scikit-Learn", sklearn.__version__)
```
### paths configuration
```
baseDir = '/Users/joseeleandrocustodio/Dropbox/mestrado/02 - Pesquisa/code';
inputDir= pathjoin(baseDir,'pan18aa');
outputDir= pathjoin(baseDir,'out',"oficial");
if not os.path.exists(outputDir):
os.mkdir(outputDir);
```
## loading the dataset
```
def readCollectionsOfProblems(path):
# Reading information about the collection
infocollection = path+os.sep+'collection-info.json'
with open(infocollection, 'r') as f:
problems = [
{
'problem': attrib['problem-name'],
'language': attrib['language'],
'encoding': attrib['encoding'],
}
for attrib in json.load(f)
]
return problems;
def readProblem(path, problem):
# Reading information about the problem
infoproblem = path+os.sep+problem+os.sep+'problem-info.json'
candidates = []
with open(infoproblem, 'r') as f:
fj = json.load(f)
unk_folder = fj['unknown-folder']
for attrib in fj['candidate-authors']:
candidates.append(attrib['author-name'])
return unk_folder, candidates;
def read_files(path,label):
# Reads all text files located in the 'path' and assigns them to 'label' class
files = glob.glob(pathjoin(path,label,'*.txt'))
texts=[]
for i,v in enumerate(files):
f=codecs.open(v,'r',encoding='utf-8')
texts.append((f.read(),label, os.path.basename(v)))
f.close()
return texts
problems = readCollectionsOfProblems(inputDir);
for index,problem in enumerate(problems):
unk_folder, candidates_folder = readProblem(inputDir, problem['problem']);
problem['candidates_folder_count'] = len(candidates_folder);
problem['candidates'] = [];
for candidate in candidates_folder:
problem['candidates'].extend(read_files(pathjoin(inputDir, problem['problem']),candidate));
problem['unknown'] = read_files(pathjoin(inputDir, problem['problem']),unk_folder);
pd.DataFrame(problems)
#*******************************************************************************************************
def eval_measures(gt, pred):
"""Compute macro-averaged F1-scores, macro-averaged precision,
macro-averaged recall, and micro-averaged accuracy according the ad hoc
rules discussed at the top of this file.
Parameters
----------
gt : dict
Ground truth, where keys indicate text file names
(e.g. `unknown00002.txt`), and values represent
author labels (e.g. `candidate00003`)
pred : dict
Predicted attribution, where keys indicate text file names
(e.g. `unknown00002.txt`), and values represent
author labels (e.g. `candidate00003`)
Returns
-------
f1 : float
Macro-averaged F1-score
precision : float
Macro-averaged precision
recall : float
Macro-averaged recall
accuracy : float
Micro-averaged F1-score
"""
actual_authors = list(gt.values())
encoder = LabelEncoder().fit(['<UNK>'] + actual_authors)
text_ids, gold_authors, silver_authors = [], [], []
for text_id in sorted(gt):
text_ids.append(text_id)
gold_authors.append(gt[text_id])
try:
silver_authors.append(pred[text_id])
except KeyError:
# missing attributions get <UNK>:
silver_authors.append('<UNK>')
assert len(text_ids) == len(gold_authors)
assert len(text_ids) == len(silver_authors)
# replace non-existent silver authors with '<UNK>':
silver_authors = [a if a in encoder.classes_ else '<UNK>'
for a in silver_authors]
gold_author_ints = encoder.transform(gold_authors)
silver_author_ints = encoder.transform(silver_authors)
# get F1 for individual classes (and suppress warnings):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
f1 = f1_score(gold_author_ints,
silver_author_ints,
labels=list(set(gold_author_ints)),
average='macro')
precision = precision_score(gold_author_ints,
silver_author_ints,
labels=list(set(gold_author_ints)),
average='macro')
recall = recall_score(gold_author_ints,
silver_author_ints,
labels=list(set(gold_author_ints)),
average='macro')
accuracy = accuracy_score(gold_author_ints,
silver_author_ints)
return f1,precision,recall,accuracy
def evaluate(ground_truth_file,predictions_file):
# Calculates evaluation measures for a single attribution problem
gt = {}
with open(ground_truth_file, 'r') as f:
for attrib in json.load(f)['ground_truth']:
gt[attrib['unknown-text']] = attrib['true-author']
pred = {}
with open(predictions_file, 'r') as f:
for attrib in json.load(f):
if attrib['unknown-text'] not in pred:
pred[attrib['unknown-text']] = attrib['predicted-author']
f1,precision,recall,accuracy = eval_measures(gt,pred)
return f1, precision, recall, accuracy
from sklearn.base import BaseEstimator
from scipy.sparse import issparse
class DenseTransformer(BaseEstimator):
"""Convert a sparse array into a dense array."""
def __init__(self, return_copy=True):
self.return_copy = return_copy
self.is_fitted = False
def transform(self, X, y=None):
if issparse(X):
return X.toarray()
elif self.return_copy:
return X.copy()
else:
return X
def fit(self, X, y=None):
self.is_fitted = True
return self
def fit_transform(self, X, y=None):
return self.transform(X=X, y=y)
def runML(problem):
print ("\nProblem: %s, language: %s, " %(problem['problem'],problem['language']))
train_docs, train_labels, _ = zip(*problem['candidates'])
problem['training_docs_size'] = len(train_docs);
test_docs, _, test_filename = zip(*problem['unknown'])
pipeline = Pipeline([
('vect', TfidfVectorizer(analyzer='word',
norm='l1',
max_df=1.0,
ngram_range=(1,3),
lowercase =True,
sublinear_tf=True)),
('dense', DenseTransformer()),
('scaler', MaxAbsScaler()),
('transf', PCA(0.9999)),
('clf', LogisticRegression(random_state=0,multi_class='multinomial', solver='newton-cg')),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__min_df':(2,0.01,0.05,0.1)
}
grid_search = GridSearchCV(pipeline,
parameters,
cv=5,
scoring='f1_macro',
n_jobs=-1,
verbose=False
)
print("Performing grid search...")
t0 = time()
grid_search.fit(train_docs, train_labels)
print("done in %0.3fs" % (time() - t0))
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
train_pred=grid_search.predict(train_docs);
test_pred=grid_search.predict(test_docs);
# Writing output file
out_data=[]
for i,v in enumerate(test_pred):
out_data.append({'unknown-text': test_filename[i],'predicted-author': v})
answerFile = pathjoin(outputDir,'answers-'+problem['problem']+'.json');
with open(answerFile, 'w') as f:
json.dump(out_data, f, indent=4)
#allProblems.extend(out_data)
#evaluation train
f1,precision,recall,accuracy=evaluate(
pathjoin(inputDir, problem['problem'], 'ground-truth.json'),
answerFile)
return {
'problem-name' : problem['problem'],
"language" : problem['language'],
'AuthorCount' : len(set(train_labels)),
"train_doc_size": len(train_docs),
"train_caract_per_doc": sum([len(l) for l in train_docs])/len(train_docs),
"test_doc_size" : len(test_docs),
"test_caract_per_doc": sum([len(l) for l in test_docs])/len(test_docs),
'macro-f1' : round(f1,3),
'macro-precision': round(precision,3),
'macro-recall' : round(recall,3),
'micro-accuracy' : round(accuracy,3),
}, grid_search.cv_results_, best_parameters;
```
### examinando o parametro min_df isoladamente
```
result = [];
cv_result = [];
best_parameters = [];
for problem in problems:
r, c, b = runML(problem);
result.append(r);
cv_result.append(c);
b['problem'] = problem['problem'];
best_parameters.append(b);
pd.DataFrame(best_parameters)[['problem','vect__min_df']]
```
### analisando os demais parametros
```
def runML(problem):
print ("\nProblem: %s, language: %s, " %(problem['problem'],problem['language']))
train_docs, train_labels, _ = zip(*problem['candidates'])
problem['training_docs_size'] = len(train_docs);
test_docs, _, test_filename = zip(*problem['unknown'])
pipeline = Pipeline([
('vect', TfidfVectorizer(analyzer='word',
norm='l1',
min_df=2,
max_df=1.0,
smooth_idf=True,
lowercase =True,
sublinear_tf=True)),
('dense', DenseTransformer()),
('scaler', MaxAbsScaler()),
('transf', PCA()),
('clf', LogisticRegression(random_state=0,multi_class='multinomial', solver='newton-cg')),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__ngram_range':((1,1),(1,2),(1,3)),
'vect__sublinear_tf':(True, False),
'vect__norm':('l1','l2',None),
'transf__n_components': (0.1,0.25,0.5,0.75,0.9,0.999),
}
grid_search = GridSearchCV(pipeline,
parameters,
cv=5,
scoring='f1_macro',
n_jobs=-1,
verbose=False
)
print("Performing grid search...")
t0 = time()
grid_search.fit(train_docs, train_labels)
print("done in %0.3fs" % (time() - t0))
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
train_pred=grid_search.predict(train_docs);
test_pred=grid_search.predict(test_docs);
# Writing output file
out_data=[]
for i,v in enumerate(test_pred):
out_data.append({'unknown-text': test_filename[i],'predicted-author': v})
answerFile = pathjoin(outputDir,'answers-'+problem['problem']+'.json');
with open(answerFile, 'w') as f:
json.dump(out_data, f, indent=4)
#allProblems.extend(out_data)
#evaluation train
f1,precision,recall,accuracy=evaluate(
pathjoin(inputDir, problem['problem'], 'ground-truth.json'),
answerFile)
return {
'problem-name' : problem['problem'],
"language" : problem['language'],
'AuthorCount' : len(set(train_labels)),
"train_doc_size": len(train_docs),
"train_caract_per_doc": sum([len(l) for l in train_docs])/len(train_docs),
"test_doc_size" : len(test_docs),
"test_caract_per_doc": sum([len(l) for l in test_docs])/len(test_docs),
'macro-f1' : round(f1,3),
'macro-precision': round(precision,3),
'macro-recall' : round(recall,3),
'micro-accuracy' : round(accuracy,3),
}, grid_search.cv_results_, best_parameters;
result = [];
cv_result = [];
best_parameters = [];
for problem in problems:
r, c, b = runML(problem);
result.append(r);
cv_result.append(c);
b['problem'] = problem['problem'];
best_parameters.append(b);
df=pd.DataFrame(result)[['problem-name',
"language",
'AuthorCount',
"train_doc_size","train_caract_per_doc",
"test_doc_size", "test_caract_per_doc",
'macro-f1','macro-precision','macro-recall' ,'micro-accuracy']]
df
print(df[["macro-f1"]].reset_index().to_latex(index=False).replace(" "," "))
pd.DataFrame(result)[['macro-f1']].describe()
languages={
'en':'inglesa',
'sp':'espanhola',
'it':'italiana',
'pl':'polonesa',
'fr':'francesa'
}
cv_result2 = [];
dfCV = pd.DataFrame();
for i, c in enumerate(cv_result):
temp = pd.DataFrame(c);
temp['problem'] = i+1;
temp['language'] = languages[problems[i]['language']]
dfCV = dfCV.append(temp);
for p in ['param_transf__n_components',
'mean_test_score','std_test_score','mean_train_score',
'split0_test_score','split0_train_score',
'split1_test_score','split1_train_score',
'split2_test_score','split2_train_score',
'split3_test_score','split3_train_score',
'split4_test_score','split4_train_score']:
dfCV[p]=dfCV[p].astype(np.float32);
dfCV =dfCV[[
'problem',
'language',
'rank_test_score',
'param_transf__n_components',
'param_vect__ngram_range',
'param_vect__sublinear_tf',
'param_vect__norm',
'mean_test_score',
'std_test_score',
'mean_train_score',
'split0_test_score','split0_train_score',
'split1_test_score','split1_train_score',
'split2_test_score','split2_train_score',
'split3_test_score','split3_train_score',
'split4_test_score','split4_train_score',
'mean_score_time',
'mean_fit_time',
'std_fit_time',
'std_score_time',
'std_train_score',
]];
dfCV.rename(columns={
'param_transf__n_components':'PCA_componentes',
'param_vect__ngram_range':'ngram_range',
'param_vect__sublinear_tf':'sublinear_tf',
'param_vect__smooth_idf':'smooth_idf',
'param_vect__norm':'norm'
},inplace=True);
#print('\',\n\''.join(dfCV.columns))
dfCV.to_csv('PANAA2018_WORD.csv', index=False)
dfCV = pd.read_csv('PANAA2018_WORD.csv')
dfCV.head()
(dfCV[dfCV.rank_test_score == 1])[
['problem',
'language',
'rank_test_score',
'mean_test_score',
'std_test_score',
'ngram_range',
'sublinear_tf',
'PCA_componentes']
].sort_values(by=[
'problem',
'mean_test_score',
'ngram_range',
'sublinear_tf',
'PCA_componentes'
], ascending=[True, False,False,False,False])
dfCV.pivot_table(
index=['problem','language','PCA_componentes'],
columns=['norm','sublinear_tf', 'ngram_range'],
values='mean_test_score'
)
pd.options.display.precision = 3
print(u"\\begin{table}[h]\n\\centering\n\\caption{Medida F1 para os parâmetros }")
print(re.sub(r'[ ]{2,}',' ',dfCV[dfCV.PCA_componentes >= 0.999].pivot_table(
index=['problem','language','sublinear_tf','norm'],
columns=['ngram_range'],
values='mean_test_score'
).to_latex()))
print ("\label{tab:modelocaracter}")
print(r"\end{table}")
d = dfCV[dfCV.PCA_componentes > 0.9].rename(columns={'language':u'Língua', 'sublinear_tf':'TF Sublinear'})
d = d [ d.norm.isna() == False]
d['autorNumber'] = d.problem.map(lambda x: 20 if x % 2==0 else 5)
d.problem = d.apply(lambda x: x[u'Língua'] +" "+ str(x[u'problem']), axis=1)
d.std_test_score =d.std_test_score / d.std_test_score.quantile(0.95) *500;
d.std_test_score +=1;
d.std_test_score = d.std_test_score.astype(np.int64)
g = sns.FacetGrid(d, row='problem', hue='TF Sublinear', col="norm", size=3,palette="Set1")
g.map(plt.scatter, "ngram_range", "mean_test_score", alpha=0.5, s=d.std_test_score.values).add_legend();
g = sns.FacetGrid(d, row='autorNumber', hue='TF Sublinear', col=u"Língua", size=3,palette="Set1")
g.map(plt.scatter, "ngram_range", "mean_test_score", alpha=0.5, s=d.std_test_score.values).add_legend();
import statsmodels.api as sm
d = dfCV[['mean_test_score','problem', 'language','sublinear_tf','norm','ngram_range','PCA_componentes']].copy();
d.sublinear_tf=d.sublinear_tf.apply(lambda x: 1 if x else 0)
d['autorNumber'] = d.problem.map(lambda x: 20 if x % 2==0 else 5)
d.norm.fillna(value='None', inplace=True);
d.PCA_componentes = np.log(d.PCA_componentes);
_, d['ngram_max'] = zip(*d.ngram_range.str.replace(r'[^\d,]','').str.split(',').values.tolist())
#d.ngram_min = d.ngram_min.astype(np.uint8);
d.ngram_max = d.ngram_max.astype(np.uint8);
d.drop(columns=['ngram_range','problem'], inplace=True)
#d['intercept'] = 1;
d=pd.get_dummies(d, columns=['language', 'norm','ngram_max'])
d.describe()
mod = sm.OLS( d.iloc[:,0], d.iloc[:,1:])
res = mod.fit()
res.summary()
sns.distplot(res.predict()-d.iloc[:,0].values, bins=25)
sns.jointplot(x='F1',y='F1-estimated',data=pd.DataFrame({'F1':d.iloc[:,0].values, 'F1-estimated':res.predict()}));
```
<br/><br/><br/><br/><br/>
# Abordagem desafiante 1
```
from gensim.models import Word2Vec;
class NgramSplitter(object):
def __init__(self, text, ngram=(3,3), vocabulary=None):
self.text = text
self.ngram_min = ngram[0]
self.ngram_max = ngram[1];
self.vocabulary = vocabulary;
def text2ngrams(self,text):
vect = [
text[t:t+j]
for t in xrange(len(text)-self.ngram_max+1)
for j in xrange(self.ngram_min, self.ngram_max+1)
]
if self.vocabulary is not None:
return [word for word in vect if word in self.vocabulary];
else:
return [word for word in vect if word]
def __iter__(self):
if isinstance(self.text,list):
for s in self.text:
yield self.text2ngrams(s);
elif isinstance(self.text,str) or isinstance(self.text,unicode):
yield self.text2ngrams(self.text);
class Word2VecClassifier(BaseEstimator, ClassifierMixin):
"""A classifier that uses classes embeddings to classify instances"""
def __init__(
self,
ngram = (3,4),
analyzer = 'char',
min_df = 0.3,
max_df = 1.0,
min_count =2,
embeddingSize =750,
window=10,
algorithm = 0,
iter =10
):
"""
Called when initializing the classifier
"""
self.algorithm = algorithm
self.min_count = min_count
self.embeddingSize = embeddingSize
self.window = window
self.iter = iter
self.analyzer = analyzer
self.vocabulary_ = {}
self.ngram = ngram
self.min_df = min_df
self.max_df = max_df
def _buildVectorModel(self, document):
sentenseGenerator = NgramSplitter(document,self.ngram, self.vocabulary_);
model = Word2Vec(
sentenseGenerator,
sg = self.algorithm,
iter = self.iter,
min_count= self.min_count,
window = self.window,
size = self.embeddingSize,
seed=0
);
return model.wv;
def fit(self, X, y=None):
"""
Sumarize one text per labels and transform the text into word vectors
"""
#creating author profile
profile = defaultdict(unicode);
for text, label in zip(X,y):
profile[label]+=text;
#build a global vocaculary / Using count vectorizer to create a fixed vocabulary
vectorizer = CountVectorizer(
analyzer=self.analyzer,
ngram_range=self.ngram,
min_df=self.min_df,
max_df=self.max_df,
lowercase=False
)
vectorizer.fit(X);
self.vocabulary_ = vectorizer.vocabulary_
# profile vector represent each author in the embedding space
self.profileVectors_ = {y: self._buildVectorModel(profile[y]) for y in y};
return self
def _minmax(self, a):
a = (a - a.min())/(a.max() - a.min());
return a;
def _simpleCosine(self,a, b):
'''
calculates cosine between array a and b.
This function is used because sklearn similiraty function compares all elements vs all elements
what will not be used. So this function becames handy.
'''
a = a / np.sqrt(np.sum(a **2));
b = b / np.sqrt(np.sum(b **2));
cos = np.sum(np.array(a) * np.array(b));
return cos;
def _KLD(self,p, q):
p = self._minmax(p); p = p/p.sum();
q = self._minmax(q); q = q/q.sum();
cond = ((q != 0)&(p != 0));
k1 = np.sum(np.where(cond, p * np.log(p / q), 0));
return k1;
def _manhattan(self,p, q):
p = self._minmax(p); p = p/p.sum();
q = self._minmax(q); q = q/q.sum();
return np.mean(np.abs(p-q));
def _guassian(self, C,D):
cond = C-D !=0;
bc = np.where(cond,(C-D+1)**2/(2*np.maximum(C,D+1)),1);
return np.sum(-np.log(bc));
def score(self, X, y=None):
# counts number of values bigger than mean
return(sum(self.predict(X)))
def _softMax(self,a):
a = self._minmax(a);
a = np.exp(a)/np.sum(np.exp(a))
return a;
def _predict1Doc(self, docVect):
vocabDoc = set(docVect.vocab.keys());
metrics = [];
def c(aa,bb, funct):
voc = set(aa.vocab.keys()) & set(bb.vocab.keys())
f = np.array([
funct(aa[v], bb[v])
for v in voc
]);
f = np.sum(f)
return f;
for label in self.profileVectors_:
labelVocab = set(self.profileVectors_[label].vocab.keys());
intersect = vocabDoc & labelVocab;
union = len(vocabDoc | labelVocab);
jaccard = 1.0*len(intersect) / union;
metrics.append({
'label' : label,
'jaccard' : jaccard,
'lenIntersect': len(intersect),
'lenUnion' : union,
'lenMax' : max(len(labelVocab), len(vocabDoc)),
'similarity' : c(docVect, self.profileVectors_[label], self._simpleCosine),
'KLD' : c(docVect, self.profileVectors_[label], self._KLD),
'manhattan' : c(docVect, self.profileVectors_[label], self._manhattan),
'guassian' : c(docVect, self.profileVectors_[label], self._guassian),
})
#softmax norm
similarity = self._softMax(np.array([c['similarity'] for c in metrics ]));
guassian = self._softMax(np.array([c['guassian'] for c in metrics ]));
manhattan = self._softMax(np.array([c['manhattan'] for c in metrics ]));
#appending normalized sum of distance
for i,c in enumerate(metrics):
c.update({
'similarityNorm': similarity[i],
'guassianNorm': guassian[i],
'manhattanNorm': manhattan[i]
})
return metrics;
def predict(self, X, y=None):
try:
getattr(self, "profileVectors_")
except AttributeError:
raise RuntimeError("You must train classifer before predicting data!")
docVectors = [self._buildVectorModel(x) for x in X];
self.metrics_ = [self._predict1Doc(v) for v in docVectors];
result = [];
for r in self.metrics_:
best = r[0];
best['bestMatch'] = True;
for rr in r:
if rr != best:
rr['bestMatch'] = False;
if rr['similarityNorm'] > best['similarityNorm'] :
best['bestMatch'] = False;
best = rr;
best['bestMatch'] = True;
result.append(best);
self.predited_ = result;
return([r['label'] for r in result])
problem = problems[8];
print ("Problem: %s, language: %s, " %(problem['problem'],problem['language']))
model = Word2VecClassifier();
train_docs, train_labels,_ = zip(*problem['candidates']);
model.fit(train_docs,train_labels);
trainPred = model.predict(train_docs);
trainMetrics = model.metrics_;
df=pd.DataFrame(zip(train_labels,trainPred), columns=['label','pred'])
df.label = df.label.apply(lambda x: int(re.sub(r'\D','',x)));
df.pred = df.pred.apply(lambda x: int(re.sub(r'\D','',x)));
df.plot.scatter(x='label',y='pred');
m = trainMetrics
df = pd.DataFrame([item for s in m for item in s])
df['doc'] = [i for i,s in enumerate(m) for item in s]
df['solution'] = [train_labels[i] for i,s in enumerate(m) for item in s]
df.sort_values(by=['doc','similarityNorm', 'manhattan'], ascending=[True,False,True], inplace=True)
df['distance'] = [i for i in range(len(set(train_labels)))]* len(trainMetrics)
df[df.doc == 55]
df2 = df[df.bestMatch].copy();
df2['correct'] = df2.apply(lambda x: x['label'] == x['solution'], axis=1)
df2[['correct','doc']].groupby(by='correct').count()
model.get_params()
df2 = df[df.bestMatch].copy();
df2['correct'] = df2.apply(lambda x: x['label'] == x['solution'], axis=1)
df2[['correct','doc']].groupby(by='correct').count()
model.get_params()
df[df.solution == df.label].plot.scatter(x='distance', y='manhattanNorm')
df[df.solution == df.label].plot.scatter(x='distance', y='guassianNorm')
df[df.solution == df.label].plot.scatter(x='distance', y='similarityNorm')
df[df.solution == df.label].plot.scatter(x='manhattanNorm', y='guassianNorm', c='distance',colormap='Reds')
```
### test
```
#code from baseline
gt = {}
with open(pathjoin(inputDir, problem['problem'], 'ground-truth.json'), 'r') as f:
for attrib in json.load(f)['ground_truth']:
gt[attrib['unknown-text']] = attrib['true-author']
test_docs, _, test_filename = zip(*problem['unknown'])
test_labels = [gt[v] for v in test_filename]
testPred = model.predict(test_docs);
testMetrics = model.metrics_;
m = testMetrics
df = pd.DataFrame([item for s in m for item in s])
df['doc'] = [i for i,s in enumerate(m) for item in s]
df['solution'] = [train_labels[i] for i,s in enumerate(m) for item in s]
df.sort_values(by=['doc','similarityNorm', 'KLD'], ascending=[True,False,True], inplace=True)
df['distance'] = [i for i in range(len(set(train_labels)))]* len(testMetrics)
df[df.doc == 55]
f1,precision,recall,accuracy = eval_measures(gt,{k: v for k,v in zip(test_filename, testPred) })
pd.DataFrame([{
'macro-f1' : round(f1,3),
'macro-precision': round(precision,3),
'macro-recall' : round(recall,3),
'micro-accuracy' : round(accuracy,3)
}])
df2 = df[df.bestMatch].copy();
df2['correct'] = df2.apply(lambda x: x['label'] == x['solution'], axis=1)
df2[['correct','doc']].groupby(by='correct').count()
df[df.solution == df.label].plot.scatter(x='distance', y='guassianNorm')
df[df.solution == df.label].plot.scatter(x='distance', y='manhattanNorm')
df[df.solution == df.label].plot.scatter(x='distance', y='similarityNorm')
df[df.solution == df.label]\
.plot\
.scatter(
x='guassianNorm',
y='similarityNorm',
c='distance',
colormap='Reds',
figsize=(20,5));
```
|
github_jupyter
|
%matplotlib inline
#python basic libs
from __future__ import print_function
from tempfile import mkdtemp
from shutil import rmtree
import os;
from os.path import join as pathjoin;
import re;
import glob;
import json;
import codecs;
from collections import defaultdict;
import pprint;
import warnings;
from pprint import pprint
from time import time
import logging
#data analysis libs
import numpy as np;
import pandas as pd;
import seaborn as sn;
import matplotlib.pyplot as plt;
import random;
#machine learning libs
#feature extraction
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
#preprocessing and transformation
from sklearn.preprocessing import normalize, MaxAbsScaler, MinMaxScaler;
from sklearn.preprocessing import LabelBinarizer, LabelEncoder;
from sklearn.decomposition import PCA;
from sklearn.metrics.pairwise import cosine_similarity;
from sklearn.base import BaseEstimator, ClassifierMixin
#classifiers
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
#model valuation
from sklearn.model_selection import train_test_split;
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score, accuracy_score;
import seaborn as sns;
sns.set(color_codes=True);
from pandas.plotting import scatter_matrix
import platform; print(platform.platform())
print("NumPy", np.__version__)
import scipy; print("SciPy", scipy.__version__)
import sklearn; print("Scikit-Learn", sklearn.__version__)
baseDir = '/Users/joseeleandrocustodio/Dropbox/mestrado/02 - Pesquisa/code';
inputDir= pathjoin(baseDir,'pan18aa');
outputDir= pathjoin(baseDir,'out',"oficial");
if not os.path.exists(outputDir):
os.mkdir(outputDir);
def readCollectionsOfProblems(path):
# Reading information about the collection
infocollection = path+os.sep+'collection-info.json'
with open(infocollection, 'r') as f:
problems = [
{
'problem': attrib['problem-name'],
'language': attrib['language'],
'encoding': attrib['encoding'],
}
for attrib in json.load(f)
]
return problems;
def readProblem(path, problem):
# Reading information about the problem
infoproblem = path+os.sep+problem+os.sep+'problem-info.json'
candidates = []
with open(infoproblem, 'r') as f:
fj = json.load(f)
unk_folder = fj['unknown-folder']
for attrib in fj['candidate-authors']:
candidates.append(attrib['author-name'])
return unk_folder, candidates;
def read_files(path,label):
# Reads all text files located in the 'path' and assigns them to 'label' class
files = glob.glob(pathjoin(path,label,'*.txt'))
texts=[]
for i,v in enumerate(files):
f=codecs.open(v,'r',encoding='utf-8')
texts.append((f.read(),label, os.path.basename(v)))
f.close()
return texts
problems = readCollectionsOfProblems(inputDir);
for index,problem in enumerate(problems):
unk_folder, candidates_folder = readProblem(inputDir, problem['problem']);
problem['candidates_folder_count'] = len(candidates_folder);
problem['candidates'] = [];
for candidate in candidates_folder:
problem['candidates'].extend(read_files(pathjoin(inputDir, problem['problem']),candidate));
problem['unknown'] = read_files(pathjoin(inputDir, problem['problem']),unk_folder);
pd.DataFrame(problems)
#*******************************************************************************************************
def eval_measures(gt, pred):
"""Compute macro-averaged F1-scores, macro-averaged precision,
macro-averaged recall, and micro-averaged accuracy according the ad hoc
rules discussed at the top of this file.
Parameters
----------
gt : dict
Ground truth, where keys indicate text file names
(e.g. `unknown00002.txt`), and values represent
author labels (e.g. `candidate00003`)
pred : dict
Predicted attribution, where keys indicate text file names
(e.g. `unknown00002.txt`), and values represent
author labels (e.g. `candidate00003`)
Returns
-------
f1 : float
Macro-averaged F1-score
precision : float
Macro-averaged precision
recall : float
Macro-averaged recall
accuracy : float
Micro-averaged F1-score
"""
actual_authors = list(gt.values())
encoder = LabelEncoder().fit(['<UNK>'] + actual_authors)
text_ids, gold_authors, silver_authors = [], [], []
for text_id in sorted(gt):
text_ids.append(text_id)
gold_authors.append(gt[text_id])
try:
silver_authors.append(pred[text_id])
except KeyError:
# missing attributions get <UNK>:
silver_authors.append('<UNK>')
assert len(text_ids) == len(gold_authors)
assert len(text_ids) == len(silver_authors)
# replace non-existent silver authors with '<UNK>':
silver_authors = [a if a in encoder.classes_ else '<UNK>'
for a in silver_authors]
gold_author_ints = encoder.transform(gold_authors)
silver_author_ints = encoder.transform(silver_authors)
# get F1 for individual classes (and suppress warnings):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
f1 = f1_score(gold_author_ints,
silver_author_ints,
labels=list(set(gold_author_ints)),
average='macro')
precision = precision_score(gold_author_ints,
silver_author_ints,
labels=list(set(gold_author_ints)),
average='macro')
recall = recall_score(gold_author_ints,
silver_author_ints,
labels=list(set(gold_author_ints)),
average='macro')
accuracy = accuracy_score(gold_author_ints,
silver_author_ints)
return f1,precision,recall,accuracy
def evaluate(ground_truth_file,predictions_file):
# Calculates evaluation measures for a single attribution problem
gt = {}
with open(ground_truth_file, 'r') as f:
for attrib in json.load(f)['ground_truth']:
gt[attrib['unknown-text']] = attrib['true-author']
pred = {}
with open(predictions_file, 'r') as f:
for attrib in json.load(f):
if attrib['unknown-text'] not in pred:
pred[attrib['unknown-text']] = attrib['predicted-author']
f1,precision,recall,accuracy = eval_measures(gt,pred)
return f1, precision, recall, accuracy
from sklearn.base import BaseEstimator
from scipy.sparse import issparse
class DenseTransformer(BaseEstimator):
"""Convert a sparse array into a dense array."""
def __init__(self, return_copy=True):
self.return_copy = return_copy
self.is_fitted = False
def transform(self, X, y=None):
if issparse(X):
return X.toarray()
elif self.return_copy:
return X.copy()
else:
return X
def fit(self, X, y=None):
self.is_fitted = True
return self
def fit_transform(self, X, y=None):
return self.transform(X=X, y=y)
def runML(problem):
print ("\nProblem: %s, language: %s, " %(problem['problem'],problem['language']))
train_docs, train_labels, _ = zip(*problem['candidates'])
problem['training_docs_size'] = len(train_docs);
test_docs, _, test_filename = zip(*problem['unknown'])
pipeline = Pipeline([
('vect', TfidfVectorizer(analyzer='word',
norm='l1',
max_df=1.0,
ngram_range=(1,3),
lowercase =True,
sublinear_tf=True)),
('dense', DenseTransformer()),
('scaler', MaxAbsScaler()),
('transf', PCA(0.9999)),
('clf', LogisticRegression(random_state=0,multi_class='multinomial', solver='newton-cg')),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__min_df':(2,0.01,0.05,0.1)
}
grid_search = GridSearchCV(pipeline,
parameters,
cv=5,
scoring='f1_macro',
n_jobs=-1,
verbose=False
)
print("Performing grid search...")
t0 = time()
grid_search.fit(train_docs, train_labels)
print("done in %0.3fs" % (time() - t0))
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
train_pred=grid_search.predict(train_docs);
test_pred=grid_search.predict(test_docs);
# Writing output file
out_data=[]
for i,v in enumerate(test_pred):
out_data.append({'unknown-text': test_filename[i],'predicted-author': v})
answerFile = pathjoin(outputDir,'answers-'+problem['problem']+'.json');
with open(answerFile, 'w') as f:
json.dump(out_data, f, indent=4)
#allProblems.extend(out_data)
#evaluation train
f1,precision,recall,accuracy=evaluate(
pathjoin(inputDir, problem['problem'], 'ground-truth.json'),
answerFile)
return {
'problem-name' : problem['problem'],
"language" : problem['language'],
'AuthorCount' : len(set(train_labels)),
"train_doc_size": len(train_docs),
"train_caract_per_doc": sum([len(l) for l in train_docs])/len(train_docs),
"test_doc_size" : len(test_docs),
"test_caract_per_doc": sum([len(l) for l in test_docs])/len(test_docs),
'macro-f1' : round(f1,3),
'macro-precision': round(precision,3),
'macro-recall' : round(recall,3),
'micro-accuracy' : round(accuracy,3),
}, grid_search.cv_results_, best_parameters;
result = [];
cv_result = [];
best_parameters = [];
for problem in problems:
r, c, b = runML(problem);
result.append(r);
cv_result.append(c);
b['problem'] = problem['problem'];
best_parameters.append(b);
pd.DataFrame(best_parameters)[['problem','vect__min_df']]
def runML(problem):
print ("\nProblem: %s, language: %s, " %(problem['problem'],problem['language']))
train_docs, train_labels, _ = zip(*problem['candidates'])
problem['training_docs_size'] = len(train_docs);
test_docs, _, test_filename = zip(*problem['unknown'])
pipeline = Pipeline([
('vect', TfidfVectorizer(analyzer='word',
norm='l1',
min_df=2,
max_df=1.0,
smooth_idf=True,
lowercase =True,
sublinear_tf=True)),
('dense', DenseTransformer()),
('scaler', MaxAbsScaler()),
('transf', PCA()),
('clf', LogisticRegression(random_state=0,multi_class='multinomial', solver='newton-cg')),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__ngram_range':((1,1),(1,2),(1,3)),
'vect__sublinear_tf':(True, False),
'vect__norm':('l1','l2',None),
'transf__n_components': (0.1,0.25,0.5,0.75,0.9,0.999),
}
grid_search = GridSearchCV(pipeline,
parameters,
cv=5,
scoring='f1_macro',
n_jobs=-1,
verbose=False
)
print("Performing grid search...")
t0 = time()
grid_search.fit(train_docs, train_labels)
print("done in %0.3fs" % (time() - t0))
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
train_pred=grid_search.predict(train_docs);
test_pred=grid_search.predict(test_docs);
# Writing output file
out_data=[]
for i,v in enumerate(test_pred):
out_data.append({'unknown-text': test_filename[i],'predicted-author': v})
answerFile = pathjoin(outputDir,'answers-'+problem['problem']+'.json');
with open(answerFile, 'w') as f:
json.dump(out_data, f, indent=4)
#allProblems.extend(out_data)
#evaluation train
f1,precision,recall,accuracy=evaluate(
pathjoin(inputDir, problem['problem'], 'ground-truth.json'),
answerFile)
return {
'problem-name' : problem['problem'],
"language" : problem['language'],
'AuthorCount' : len(set(train_labels)),
"train_doc_size": len(train_docs),
"train_caract_per_doc": sum([len(l) for l in train_docs])/len(train_docs),
"test_doc_size" : len(test_docs),
"test_caract_per_doc": sum([len(l) for l in test_docs])/len(test_docs),
'macro-f1' : round(f1,3),
'macro-precision': round(precision,3),
'macro-recall' : round(recall,3),
'micro-accuracy' : round(accuracy,3),
}, grid_search.cv_results_, best_parameters;
result = [];
cv_result = [];
best_parameters = [];
for problem in problems:
r, c, b = runML(problem);
result.append(r);
cv_result.append(c);
b['problem'] = problem['problem'];
best_parameters.append(b);
df=pd.DataFrame(result)[['problem-name',
"language",
'AuthorCount',
"train_doc_size","train_caract_per_doc",
"test_doc_size", "test_caract_per_doc",
'macro-f1','macro-precision','macro-recall' ,'micro-accuracy']]
df
print(df[["macro-f1"]].reset_index().to_latex(index=False).replace(" "," "))
pd.DataFrame(result)[['macro-f1']].describe()
languages={
'en':'inglesa',
'sp':'espanhola',
'it':'italiana',
'pl':'polonesa',
'fr':'francesa'
}
cv_result2 = [];
dfCV = pd.DataFrame();
for i, c in enumerate(cv_result):
temp = pd.DataFrame(c);
temp['problem'] = i+1;
temp['language'] = languages[problems[i]['language']]
dfCV = dfCV.append(temp);
for p in ['param_transf__n_components',
'mean_test_score','std_test_score','mean_train_score',
'split0_test_score','split0_train_score',
'split1_test_score','split1_train_score',
'split2_test_score','split2_train_score',
'split3_test_score','split3_train_score',
'split4_test_score','split4_train_score']:
dfCV[p]=dfCV[p].astype(np.float32);
dfCV =dfCV[[
'problem',
'language',
'rank_test_score',
'param_transf__n_components',
'param_vect__ngram_range',
'param_vect__sublinear_tf',
'param_vect__norm',
'mean_test_score',
'std_test_score',
'mean_train_score',
'split0_test_score','split0_train_score',
'split1_test_score','split1_train_score',
'split2_test_score','split2_train_score',
'split3_test_score','split3_train_score',
'split4_test_score','split4_train_score',
'mean_score_time',
'mean_fit_time',
'std_fit_time',
'std_score_time',
'std_train_score',
]];
dfCV.rename(columns={
'param_transf__n_components':'PCA_componentes',
'param_vect__ngram_range':'ngram_range',
'param_vect__sublinear_tf':'sublinear_tf',
'param_vect__smooth_idf':'smooth_idf',
'param_vect__norm':'norm'
},inplace=True);
#print('\',\n\''.join(dfCV.columns))
dfCV.to_csv('PANAA2018_WORD.csv', index=False)
dfCV = pd.read_csv('PANAA2018_WORD.csv')
dfCV.head()
(dfCV[dfCV.rank_test_score == 1])[
['problem',
'language',
'rank_test_score',
'mean_test_score',
'std_test_score',
'ngram_range',
'sublinear_tf',
'PCA_componentes']
].sort_values(by=[
'problem',
'mean_test_score',
'ngram_range',
'sublinear_tf',
'PCA_componentes'
], ascending=[True, False,False,False,False])
dfCV.pivot_table(
index=['problem','language','PCA_componentes'],
columns=['norm','sublinear_tf', 'ngram_range'],
values='mean_test_score'
)
pd.options.display.precision = 3
print(u"\\begin{table}[h]\n\\centering\n\\caption{Medida F1 para os parâmetros }")
print(re.sub(r'[ ]{2,}',' ',dfCV[dfCV.PCA_componentes >= 0.999].pivot_table(
index=['problem','language','sublinear_tf','norm'],
columns=['ngram_range'],
values='mean_test_score'
).to_latex()))
print ("\label{tab:modelocaracter}")
print(r"\end{table}")
d = dfCV[dfCV.PCA_componentes > 0.9].rename(columns={'language':u'Língua', 'sublinear_tf':'TF Sublinear'})
d = d [ d.norm.isna() == False]
d['autorNumber'] = d.problem.map(lambda x: 20 if x % 2==0 else 5)
d.problem = d.apply(lambda x: x[u'Língua'] +" "+ str(x[u'problem']), axis=1)
d.std_test_score =d.std_test_score / d.std_test_score.quantile(0.95) *500;
d.std_test_score +=1;
d.std_test_score = d.std_test_score.astype(np.int64)
g = sns.FacetGrid(d, row='problem', hue='TF Sublinear', col="norm", size=3,palette="Set1")
g.map(plt.scatter, "ngram_range", "mean_test_score", alpha=0.5, s=d.std_test_score.values).add_legend();
g = sns.FacetGrid(d, row='autorNumber', hue='TF Sublinear', col=u"Língua", size=3,palette="Set1")
g.map(plt.scatter, "ngram_range", "mean_test_score", alpha=0.5, s=d.std_test_score.values).add_legend();
import statsmodels.api as sm
d = dfCV[['mean_test_score','problem', 'language','sublinear_tf','norm','ngram_range','PCA_componentes']].copy();
d.sublinear_tf=d.sublinear_tf.apply(lambda x: 1 if x else 0)
d['autorNumber'] = d.problem.map(lambda x: 20 if x % 2==0 else 5)
d.norm.fillna(value='None', inplace=True);
d.PCA_componentes = np.log(d.PCA_componentes);
_, d['ngram_max'] = zip(*d.ngram_range.str.replace(r'[^\d,]','').str.split(',').values.tolist())
#d.ngram_min = d.ngram_min.astype(np.uint8);
d.ngram_max = d.ngram_max.astype(np.uint8);
d.drop(columns=['ngram_range','problem'], inplace=True)
#d['intercept'] = 1;
d=pd.get_dummies(d, columns=['language', 'norm','ngram_max'])
d.describe()
mod = sm.OLS( d.iloc[:,0], d.iloc[:,1:])
res = mod.fit()
res.summary()
sns.distplot(res.predict()-d.iloc[:,0].values, bins=25)
sns.jointplot(x='F1',y='F1-estimated',data=pd.DataFrame({'F1':d.iloc[:,0].values, 'F1-estimated':res.predict()}));
from gensim.models import Word2Vec;
class NgramSplitter(object):
def __init__(self, text, ngram=(3,3), vocabulary=None):
self.text = text
self.ngram_min = ngram[0]
self.ngram_max = ngram[1];
self.vocabulary = vocabulary;
def text2ngrams(self,text):
vect = [
text[t:t+j]
for t in xrange(len(text)-self.ngram_max+1)
for j in xrange(self.ngram_min, self.ngram_max+1)
]
if self.vocabulary is not None:
return [word for word in vect if word in self.vocabulary];
else:
return [word for word in vect if word]
def __iter__(self):
if isinstance(self.text,list):
for s in self.text:
yield self.text2ngrams(s);
elif isinstance(self.text,str) or isinstance(self.text,unicode):
yield self.text2ngrams(self.text);
class Word2VecClassifier(BaseEstimator, ClassifierMixin):
"""A classifier that uses classes embeddings to classify instances"""
def __init__(
self,
ngram = (3,4),
analyzer = 'char',
min_df = 0.3,
max_df = 1.0,
min_count =2,
embeddingSize =750,
window=10,
algorithm = 0,
iter =10
):
"""
Called when initializing the classifier
"""
self.algorithm = algorithm
self.min_count = min_count
self.embeddingSize = embeddingSize
self.window = window
self.iter = iter
self.analyzer = analyzer
self.vocabulary_ = {}
self.ngram = ngram
self.min_df = min_df
self.max_df = max_df
def _buildVectorModel(self, document):
sentenseGenerator = NgramSplitter(document,self.ngram, self.vocabulary_);
model = Word2Vec(
sentenseGenerator,
sg = self.algorithm,
iter = self.iter,
min_count= self.min_count,
window = self.window,
size = self.embeddingSize,
seed=0
);
return model.wv;
def fit(self, X, y=None):
"""
Sumarize one text per labels and transform the text into word vectors
"""
#creating author profile
profile = defaultdict(unicode);
for text, label in zip(X,y):
profile[label]+=text;
#build a global vocaculary / Using count vectorizer to create a fixed vocabulary
vectorizer = CountVectorizer(
analyzer=self.analyzer,
ngram_range=self.ngram,
min_df=self.min_df,
max_df=self.max_df,
lowercase=False
)
vectorizer.fit(X);
self.vocabulary_ = vectorizer.vocabulary_
# profile vector represent each author in the embedding space
self.profileVectors_ = {y: self._buildVectorModel(profile[y]) for y in y};
return self
def _minmax(self, a):
a = (a - a.min())/(a.max() - a.min());
return a;
def _simpleCosine(self,a, b):
'''
calculates cosine between array a and b.
This function is used because sklearn similiraty function compares all elements vs all elements
what will not be used. So this function becames handy.
'''
a = a / np.sqrt(np.sum(a **2));
b = b / np.sqrt(np.sum(b **2));
cos = np.sum(np.array(a) * np.array(b));
return cos;
def _KLD(self,p, q):
p = self._minmax(p); p = p/p.sum();
q = self._minmax(q); q = q/q.sum();
cond = ((q != 0)&(p != 0));
k1 = np.sum(np.where(cond, p * np.log(p / q), 0));
return k1;
def _manhattan(self,p, q):
p = self._minmax(p); p = p/p.sum();
q = self._minmax(q); q = q/q.sum();
return np.mean(np.abs(p-q));
def _guassian(self, C,D):
cond = C-D !=0;
bc = np.where(cond,(C-D+1)**2/(2*np.maximum(C,D+1)),1);
return np.sum(-np.log(bc));
def score(self, X, y=None):
# counts number of values bigger than mean
return(sum(self.predict(X)))
def _softMax(self,a):
a = self._minmax(a);
a = np.exp(a)/np.sum(np.exp(a))
return a;
def _predict1Doc(self, docVect):
vocabDoc = set(docVect.vocab.keys());
metrics = [];
def c(aa,bb, funct):
voc = set(aa.vocab.keys()) & set(bb.vocab.keys())
f = np.array([
funct(aa[v], bb[v])
for v in voc
]);
f = np.sum(f)
return f;
for label in self.profileVectors_:
labelVocab = set(self.profileVectors_[label].vocab.keys());
intersect = vocabDoc & labelVocab;
union = len(vocabDoc | labelVocab);
jaccard = 1.0*len(intersect) / union;
metrics.append({
'label' : label,
'jaccard' : jaccard,
'lenIntersect': len(intersect),
'lenUnion' : union,
'lenMax' : max(len(labelVocab), len(vocabDoc)),
'similarity' : c(docVect, self.profileVectors_[label], self._simpleCosine),
'KLD' : c(docVect, self.profileVectors_[label], self._KLD),
'manhattan' : c(docVect, self.profileVectors_[label], self._manhattan),
'guassian' : c(docVect, self.profileVectors_[label], self._guassian),
})
#softmax norm
similarity = self._softMax(np.array([c['similarity'] for c in metrics ]));
guassian = self._softMax(np.array([c['guassian'] for c in metrics ]));
manhattan = self._softMax(np.array([c['manhattan'] for c in metrics ]));
#appending normalized sum of distance
for i,c in enumerate(metrics):
c.update({
'similarityNorm': similarity[i],
'guassianNorm': guassian[i],
'manhattanNorm': manhattan[i]
})
return metrics;
def predict(self, X, y=None):
try:
getattr(self, "profileVectors_")
except AttributeError:
raise RuntimeError("You must train classifer before predicting data!")
docVectors = [self._buildVectorModel(x) for x in X];
self.metrics_ = [self._predict1Doc(v) for v in docVectors];
result = [];
for r in self.metrics_:
best = r[0];
best['bestMatch'] = True;
for rr in r:
if rr != best:
rr['bestMatch'] = False;
if rr['similarityNorm'] > best['similarityNorm'] :
best['bestMatch'] = False;
best = rr;
best['bestMatch'] = True;
result.append(best);
self.predited_ = result;
return([r['label'] for r in result])
problem = problems[8];
print ("Problem: %s, language: %s, " %(problem['problem'],problem['language']))
model = Word2VecClassifier();
train_docs, train_labels,_ = zip(*problem['candidates']);
model.fit(train_docs,train_labels);
trainPred = model.predict(train_docs);
trainMetrics = model.metrics_;
df=pd.DataFrame(zip(train_labels,trainPred), columns=['label','pred'])
df.label = df.label.apply(lambda x: int(re.sub(r'\D','',x)));
df.pred = df.pred.apply(lambda x: int(re.sub(r'\D','',x)));
df.plot.scatter(x='label',y='pred');
m = trainMetrics
df = pd.DataFrame([item for s in m for item in s])
df['doc'] = [i for i,s in enumerate(m) for item in s]
df['solution'] = [train_labels[i] for i,s in enumerate(m) for item in s]
df.sort_values(by=['doc','similarityNorm', 'manhattan'], ascending=[True,False,True], inplace=True)
df['distance'] = [i for i in range(len(set(train_labels)))]* len(trainMetrics)
df[df.doc == 55]
df2 = df[df.bestMatch].copy();
df2['correct'] = df2.apply(lambda x: x['label'] == x['solution'], axis=1)
df2[['correct','doc']].groupby(by='correct').count()
model.get_params()
df2 = df[df.bestMatch].copy();
df2['correct'] = df2.apply(lambda x: x['label'] == x['solution'], axis=1)
df2[['correct','doc']].groupby(by='correct').count()
model.get_params()
df[df.solution == df.label].plot.scatter(x='distance', y='manhattanNorm')
df[df.solution == df.label].plot.scatter(x='distance', y='guassianNorm')
df[df.solution == df.label].plot.scatter(x='distance', y='similarityNorm')
df[df.solution == df.label].plot.scatter(x='manhattanNorm', y='guassianNorm', c='distance',colormap='Reds')
#code from baseline
gt = {}
with open(pathjoin(inputDir, problem['problem'], 'ground-truth.json'), 'r') as f:
for attrib in json.load(f)['ground_truth']:
gt[attrib['unknown-text']] = attrib['true-author']
test_docs, _, test_filename = zip(*problem['unknown'])
test_labels = [gt[v] for v in test_filename]
testPred = model.predict(test_docs);
testMetrics = model.metrics_;
m = testMetrics
df = pd.DataFrame([item for s in m for item in s])
df['doc'] = [i for i,s in enumerate(m) for item in s]
df['solution'] = [train_labels[i] for i,s in enumerate(m) for item in s]
df.sort_values(by=['doc','similarityNorm', 'KLD'], ascending=[True,False,True], inplace=True)
df['distance'] = [i for i in range(len(set(train_labels)))]* len(testMetrics)
df[df.doc == 55]
f1,precision,recall,accuracy = eval_measures(gt,{k: v for k,v in zip(test_filename, testPred) })
pd.DataFrame([{
'macro-f1' : round(f1,3),
'macro-precision': round(precision,3),
'macro-recall' : round(recall,3),
'micro-accuracy' : round(accuracy,3)
}])
df2 = df[df.bestMatch].copy();
df2['correct'] = df2.apply(lambda x: x['label'] == x['solution'], axis=1)
df2[['correct','doc']].groupby(by='correct').count()
df[df.solution == df.label].plot.scatter(x='distance', y='guassianNorm')
df[df.solution == df.label].plot.scatter(x='distance', y='manhattanNorm')
df[df.solution == df.label].plot.scatter(x='distance', y='similarityNorm')
df[df.solution == df.label]\
.plot\
.scatter(
x='guassianNorm',
y='similarityNorm',
c='distance',
colormap='Reds',
figsize=(20,5));
| 0.640523 | 0.398231 |
# TextRank
## TL;DR;
The author extends the idea of PageRank:
$$S(V_{i}) = (1-d) + d* \sum_{V_{j}\in In(V_{i})}\frac{1}{\vert Out(V_{j}) \vert}WS(V_{j})$$
by by introducing weights for updating score of each vertice:
$$WS(V_{i}) = (1-d) + d* \sum_{V_{j}\in In(V_{i})}\frac{w_{ji}}{\sum_{v_{k}\in Out(V_{j})}w_{jk}}WS(V_{j})$$,
## Example of TextRank for Key Word Extraction
For keyword extraction, simply extract words as vertices utilize word co-occurence as reference of edge weights.
```
import numpy as np
import pandas as pd
import itertools
```
Example from [TextRank:Bringing Order into Texts](https://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf)
```
example_text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. Upper bounds forcomponents of a minimal set of solutions and algorithms of construction ofminimal generating sets of solutions for all types of systems are given. These criteria and the corresponding algorithms for constructing a minimal supporting set of solutions can be used in solving all the considered types systems and systems of mixed types."
example_text
```
Apply naive white space tokenization.
```
def tokenizer(text):
tokens = []
for word in map(lambda s: s.lower(), example_text.split(" ")):
if word[-1] in ",.":
tokens += word[:-1], word[-1]
else:
tokens.append(word)
return tokens
tokens = pd.Series(tokenizer(example_text))
tokens
```
Apply syntactic filters & build graph. In this case we don't apply any filter at all.
```
def dummy_filter(token_a, token_b):
return True
def construct_graph(tokens, synatic_filter, window_width=5):
vocab = tokens.unique()
mat = pd.DataFrame(index=vocab, columns=vocab)
for window_start in range(len(tokens) - window_width + 1):
window = tokens[window_start:window_start+window_width]
for token_a, token_b in itertools.combinations(window, 2):
if synatic_filter(token_a, token_b):
mat.loc[token_a][token_b] = 1.
mat.loc[token_b][token_a] = 1.
mat.fillna(0., inplace=True)
# Remove isolated vertices
deg = mat.values.sum(axis=1)
new_indices =mat.index[deg > 0]
mat = mat.loc[new_indices, new_indices]
return mat
adj_mat = construct_graph(tokens, dummy_filter)
adj_mat
```
Calculate score of each vertice.
```
def text_rank(adj_mat, d, threshold=1e-5, max_iter=100):
assert 0 < d < 1
vertices = adj_mat.index
cur_scores = pd.Series(index=vertices, dtype=float).fillna(1)
deg_o = adj_mat.sum(axis=1)
norm_adj_mat = adj_mat.div(deg_o, axis=1)
for _ in range(max_iter):
update = norm_adj_mat.mul(cur_scores, axis=1).sum(axis=1)
new_scores = (1 - d) + d * update
if np.linalg.norm(new_scores - cur_scores) < threshold:
return new_scores
cur_scores = new_scores
return cur_scores
text_rank(adj_mat,d=0.85, max_iter=100).sort_values(ascending=False)
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import itertools
example_text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. Upper bounds forcomponents of a minimal set of solutions and algorithms of construction ofminimal generating sets of solutions for all types of systems are given. These criteria and the corresponding algorithms for constructing a minimal supporting set of solutions can be used in solving all the considered types systems and systems of mixed types."
example_text
def tokenizer(text):
tokens = []
for word in map(lambda s: s.lower(), example_text.split(" ")):
if word[-1] in ",.":
tokens += word[:-1], word[-1]
else:
tokens.append(word)
return tokens
tokens = pd.Series(tokenizer(example_text))
tokens
def dummy_filter(token_a, token_b):
return True
def construct_graph(tokens, synatic_filter, window_width=5):
vocab = tokens.unique()
mat = pd.DataFrame(index=vocab, columns=vocab)
for window_start in range(len(tokens) - window_width + 1):
window = tokens[window_start:window_start+window_width]
for token_a, token_b in itertools.combinations(window, 2):
if synatic_filter(token_a, token_b):
mat.loc[token_a][token_b] = 1.
mat.loc[token_b][token_a] = 1.
mat.fillna(0., inplace=True)
# Remove isolated vertices
deg = mat.values.sum(axis=1)
new_indices =mat.index[deg > 0]
mat = mat.loc[new_indices, new_indices]
return mat
adj_mat = construct_graph(tokens, dummy_filter)
adj_mat
def text_rank(adj_mat, d, threshold=1e-5, max_iter=100):
assert 0 < d < 1
vertices = adj_mat.index
cur_scores = pd.Series(index=vertices, dtype=float).fillna(1)
deg_o = adj_mat.sum(axis=1)
norm_adj_mat = adj_mat.div(deg_o, axis=1)
for _ in range(max_iter):
update = norm_adj_mat.mul(cur_scores, axis=1).sum(axis=1)
new_scores = (1 - d) + d * update
if np.linalg.norm(new_scores - cur_scores) < threshold:
return new_scores
cur_scores = new_scores
return cur_scores
text_rank(adj_mat,d=0.85, max_iter=100).sort_values(ascending=False)
| 0.571408 | 0.917451 |
# Data description & Problem statement:
The data concerns city-cycle fuel consumption in miles per gallon, to be predicted in terms of 3 multivalued discrete and 5 continuous attributes. Please check the description at: https://archive.ics.uci.edu/ml/datasets/auto+mpg
* The data has 398 rows and 9 variables.
* This is a Regression problem. We predict the Auto MPG.
# Workflow:
- Load the dataset, and define the required functions (e.g. for detecting the outliers)
- Data Cleaning/Wrangling: Manipulate outliers, missing data or duplicate values, Encode categorical variables, etc.
- Split data into training & test parts (utilize the training part for training & hyperparameter tuning of model, and test part for the final evaluation of model)
# Model Training:
- Build an initial RF model, and evaluate it via C-V approach
- Use grid-search along with C-V approach to find the best hyperparameters of RF model: Find the best RF model
# Model Evaluation:
- Evaluate the best RF model with optimized hyperparameters on Test Dataset, by calculating:
- r2 (determination factor)
- Lift chart
- RMSE
- Box-plot for prediction Vs. actual values
- Distribution plot of error/residuals
Finally, calculate the Feature Importance for the features
```
import sklearn
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
%matplotlib inline
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
df=pd.read_csv('C:/Users/rhash/Documents/Datasets/mpg/mpg.csv')
# To Shuffle the data:
np.random.seed(42)
df=df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
df.info()
df['horsepower'] = df['horsepower'].apply(pd.to_numeric, errors='coerce')
df.dropna(inplace=True)
df.drop(['name' ], axis=1, inplace=True)
# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)
def encode_text_dummy(df, name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = "{}-{}".format(name, x)
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
for name in ['origin']:
encode_text_dummy(df, name)
df.head(5)
df.info()
X=df.drop(['mpg'], axis=1)
y=np.log(df['mpg'])
# Re-scaling:
from sklearn.preprocessing import StandardScaler, MinMaxScaler, PolynomialFeatures
# We initially devide data into training & test folds: We do the Grid-Search only on training part
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
scalor_X=MinMaxScaler().fit(pd.DataFrame(X_train))
X_train=scalor_X.transform(pd.DataFrame(X_train))
X_test=scalor_X.transform(pd.DataFrame(X_test))
scaler_y=MinMaxScaler().fit(pd.DataFrame(y_train))
y_train=scaler_y.transform(pd.DataFrame(y_train))
y_test=scaler_y.transform(pd.DataFrame(y_test))
# We build the Initial Model & Cross-Validation:
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
model=RandomForestRegressor(max_features=9, n_estimators=200, max_depth=7, random_state=42)
kfold=KFold(n_splits=5, shuffle=True, random_state=42)
scores=cross_val_score(model, X_train, y_train, cv=kfold)
print(scores, "\n")
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std()))
# Grid-Serach for the best model parameters:
from sklearn.model_selection import GridSearchCV
param={'max_depth':[2, 3, 5, 7, 10, 15], 'max_features':[2, 3, 5, 7, 9], 'n_estimators': [10, 50, 200]}
kfold=KFold(n_splits=4, shuffle=True, random_state=42)
grid_search=GridSearchCV(RandomForestRegressor(random_state=42), param, cv=kfold, n_jobs=-1, scoring='r2')
grid_search.fit(X_train, y_train)
G=pd.DataFrame(grid_search.cv_results_)
G.sort_values("rank_test_score").head(3)
print("Best parameters: ", grid_search.best_params_)
print("Best validation accuracy: %0.2f (+/- %0.2f)" % (np.round(grid_search.best_score_, decimals=2), np.round(G.loc[grid_search.best_index_,"std_test_score" ], decimals=2)))
print("Test score: ", np.round(grid_search.score(X_test, y_test),2))
# Feature Importance:
im=RandomForestRegressor( max_depth= 5, max_features= 5, n_estimators= 25, random_state=42).fit(X,y).feature_importances_
# Sort & Plot:
d=dict(zip(X.columns, im))
k=sorted(d,key=lambda i: d[i], reverse= True)
[print((i,d[i])) for i in k]
# Plot:
c1=pd.DataFrame(np.array(im), columns=["Importance"])
c2=pd.DataFrame(X.columns, columns=["Feature"])
fig, ax = plt.subplots(figsize=(8,6))
sns.barplot(x="Feature", y="Importance", data=pd.concat([c2,c1], axis=1), color="blue", ax=ax)
# Plot the Lift Chart:
# Regression chart.
def chart_regression(pred,y,sort=True):
t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()})
if sort:
t.sort_values(by=['y'],inplace=True)
a = plt.plot(t['y'].tolist(),label='expected')
b = plt.plot(t['pred'].tolist(),label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
pred=grid_search.predict(X_test)
chart_regression(pred.flatten(), np.array(y_test), sort=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
pred_inv=scaler_y.inverse_transform(pd.DataFrame(pred))
y_test_inv=scaler_y.inverse_transform(y_test)
rmse = sqrt(mean_squared_error(np.e**y_test_inv, np.e**pred_inv))
print('Test rmse: ', rmse)
plt.boxplot([y_test_inv.ravel(), pred_inv.ravel()], labels=['actual','predicted'])
plt.title('Box Plot - Actual, Predicted')
plt.ylabel('Target')
plt.grid(True)
# Over prediction and Under Prediction needs to be balanced:
# Training Data Residuals
residuals = y_test_inv.ravel()-pred_inv.ravel()
plt.hist(residuals)
plt.grid(True)
plt.xlabel('(Predicted - Actual)')
plt.ylabel('Count')
plt.title('Residuals Distribution')
plt.axvline(color='g')
plt.show()
im=RandomForestRegressor().fit(X, y).feature_importances_
# Sort & Plot:
d=dict(zip(np.array(X.columns), im))
k=sorted(d,key=lambda i: d[i], reverse= True)
[print((i,d[i])) for i in k]
# Plot:
c1=pd.DataFrame(np.array(im), columns=["Importance"])
c2=pd.DataFrame(np.array(X.columns),columns=["Feature"])
fig, ax = plt.subplots(figsize=(12,6))
sns.barplot(x="Feature", y="Importance", data=pd.concat([c2,c1], axis=1), color="blue", ax=ax)
plt.xticks(rotation=-60)
plt.show()
```
|
github_jupyter
|
import sklearn
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
%matplotlib inline
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
df=pd.read_csv('C:/Users/rhash/Documents/Datasets/mpg/mpg.csv')
# To Shuffle the data:
np.random.seed(42)
df=df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
df.info()
df['horsepower'] = df['horsepower'].apply(pd.to_numeric, errors='coerce')
df.dropna(inplace=True)
df.drop(['name' ], axis=1, inplace=True)
# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)
def encode_text_dummy(df, name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = "{}-{}".format(name, x)
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
for name in ['origin']:
encode_text_dummy(df, name)
df.head(5)
df.info()
X=df.drop(['mpg'], axis=1)
y=np.log(df['mpg'])
# Re-scaling:
from sklearn.preprocessing import StandardScaler, MinMaxScaler, PolynomialFeatures
# We initially devide data into training & test folds: We do the Grid-Search only on training part
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
scalor_X=MinMaxScaler().fit(pd.DataFrame(X_train))
X_train=scalor_X.transform(pd.DataFrame(X_train))
X_test=scalor_X.transform(pd.DataFrame(X_test))
scaler_y=MinMaxScaler().fit(pd.DataFrame(y_train))
y_train=scaler_y.transform(pd.DataFrame(y_train))
y_test=scaler_y.transform(pd.DataFrame(y_test))
# We build the Initial Model & Cross-Validation:
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
model=RandomForestRegressor(max_features=9, n_estimators=200, max_depth=7, random_state=42)
kfold=KFold(n_splits=5, shuffle=True, random_state=42)
scores=cross_val_score(model, X_train, y_train, cv=kfold)
print(scores, "\n")
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std()))
# Grid-Serach for the best model parameters:
from sklearn.model_selection import GridSearchCV
param={'max_depth':[2, 3, 5, 7, 10, 15], 'max_features':[2, 3, 5, 7, 9], 'n_estimators': [10, 50, 200]}
kfold=KFold(n_splits=4, shuffle=True, random_state=42)
grid_search=GridSearchCV(RandomForestRegressor(random_state=42), param, cv=kfold, n_jobs=-1, scoring='r2')
grid_search.fit(X_train, y_train)
G=pd.DataFrame(grid_search.cv_results_)
G.sort_values("rank_test_score").head(3)
print("Best parameters: ", grid_search.best_params_)
print("Best validation accuracy: %0.2f (+/- %0.2f)" % (np.round(grid_search.best_score_, decimals=2), np.round(G.loc[grid_search.best_index_,"std_test_score" ], decimals=2)))
print("Test score: ", np.round(grid_search.score(X_test, y_test),2))
# Feature Importance:
im=RandomForestRegressor( max_depth= 5, max_features= 5, n_estimators= 25, random_state=42).fit(X,y).feature_importances_
# Sort & Plot:
d=dict(zip(X.columns, im))
k=sorted(d,key=lambda i: d[i], reverse= True)
[print((i,d[i])) for i in k]
# Plot:
c1=pd.DataFrame(np.array(im), columns=["Importance"])
c2=pd.DataFrame(X.columns, columns=["Feature"])
fig, ax = plt.subplots(figsize=(8,6))
sns.barplot(x="Feature", y="Importance", data=pd.concat([c2,c1], axis=1), color="blue", ax=ax)
# Plot the Lift Chart:
# Regression chart.
def chart_regression(pred,y,sort=True):
t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()})
if sort:
t.sort_values(by=['y'],inplace=True)
a = plt.plot(t['y'].tolist(),label='expected')
b = plt.plot(t['pred'].tolist(),label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
pred=grid_search.predict(X_test)
chart_regression(pred.flatten(), np.array(y_test), sort=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
pred_inv=scaler_y.inverse_transform(pd.DataFrame(pred))
y_test_inv=scaler_y.inverse_transform(y_test)
rmse = sqrt(mean_squared_error(np.e**y_test_inv, np.e**pred_inv))
print('Test rmse: ', rmse)
plt.boxplot([y_test_inv.ravel(), pred_inv.ravel()], labels=['actual','predicted'])
plt.title('Box Plot - Actual, Predicted')
plt.ylabel('Target')
plt.grid(True)
# Over prediction and Under Prediction needs to be balanced:
# Training Data Residuals
residuals = y_test_inv.ravel()-pred_inv.ravel()
plt.hist(residuals)
plt.grid(True)
plt.xlabel('(Predicted - Actual)')
plt.ylabel('Count')
plt.title('Residuals Distribution')
plt.axvline(color='g')
plt.show()
im=RandomForestRegressor().fit(X, y).feature_importances_
# Sort & Plot:
d=dict(zip(np.array(X.columns), im))
k=sorted(d,key=lambda i: d[i], reverse= True)
[print((i,d[i])) for i in k]
# Plot:
c1=pd.DataFrame(np.array(im), columns=["Importance"])
c2=pd.DataFrame(np.array(X.columns),columns=["Feature"])
fig, ax = plt.subplots(figsize=(12,6))
sns.barplot(x="Feature", y="Importance", data=pd.concat([c2,c1], axis=1), color="blue", ax=ax)
plt.xticks(rotation=-60)
plt.show()
| 0.591723 | 0.948489 |
# Kalman Filter
Kalman filters are linear models for state estimation of dynamic systems [1]. They have been the <i>de facto</i> standard in many robotics and tracking/prediction applications because they are well suited for systems with uncertainty about an observable dynamic process. They use a "observe, predict, correct" paradigm to extract information from an otherwise noisy signal. In Pyro, we can build differentiable Kalman filters with learnable parameters using the `pyro.contrib.tracking` [library](http://docs.pyro.ai/en/dev/contrib.tracking.html#module-pyro.contrib.tracking.extended_kalman_filter)
## Dynamic process
To start, consider this simple motion model:
$$ X_{k+1} = FX_k + \mathbf{W}_k $$
$$ \mathbf{Z}_k = HX_k + \mathbf{V}_k $$
where $k$ is the state, $X$ is the signal estimate, $Z_k$ is the observed value at timestep $k$, $\mathbf{W}_k$ and $\mathbf{V}_k$ are independent noise processes (ie $\mathbb{E}[w_k v_j^T] = 0$ for all $j, k$) which we'll approximate as Gaussians. Note that the state transitions are linear.
## Kalman Update
At each time step, we perform a prediction for the mean and covariance:
$$ \hat{X}_k = F\hat{X}_{k-1}$$
$$\hat{P}_k = FP_{k-1}F^T + Q$$
and a correction for the measurement:
$$ K_k = \hat{P}_k H^T(H\hat{P}_k H^T + R)^{-1}$$
$$ X_k = \hat{X}_k + K_k(z_k - H\hat{X}_k)$$
$$ P_k = (I-K_k H)\hat{P}_k$$
where $X$ is the position estimate, $P$ is the covariance matrix, $K$ is the Kalman Gain, and $Q$ and $R$ are covariance matrices.
For an in-depth derivation, see \[1\]
## Nonlinear Estimation: Extended Kalman Filter
What if our system is non-linear, eg in GPS navigation? Consider the following non-linear system:
$$ X_{k+1} = \mathbf{f}(X_k) + \mathbf{W}_k $$
$$ \mathbf{Z}_k = \mathbf{h}(X_k) + \mathbf{V}_k $$
Notice that $\mathbf{f}$ and $\mathbf{h}$ are now (smooth) non-linear functions.
The Extended Kalman Filter (EKF) attacks this problem by using a local linearization of the Kalman filter via a [Taylors Series expansion](https://en.wikipedia.org/wiki/Taylor_series).
$$ f(X_k, k) \approx f(x_k^R, k) + \mathbf{H}_k(X_k - x_k^R) + \cdots$$
where $\mathbf{H}_k$ is the Jacobian matrix at time $k$, $x_k^R$ is the previous optimal estimate, and we ignore the higher order terms. At each time step, we compute a Jacobian conditioned the previous predictions (this computation is handled by Pyro under the hood), and use the result to perform a prediction and update.
Omitting the derivations, the modification to the above predictions are now:
$$ \hat{X}_k \approx \mathbf{f}(X_{k-1}^R)$$
$$ \hat{P}_k = \mathbf{H}_\mathbf{f}(X_{k-1})P_{k-1}\mathbf{H}_\mathbf{f}^T(X_{k-1}) + Q$$
and the updates are now:
$$ X_k \approx \hat{X}_k + K_k\big(z_k - \mathbf{h}(\hat{X}_k)\big)$$
$$ K_k = \hat{P}_k \mathbf{H}_\mathbf{h}(\hat{X}_k) \Big(\mathbf{H}_\mathbf{h}(\hat{X}_k)\hat{P}_k \mathbf{H}_\mathbf{h}(\hat{X}_k) + R_k\Big)^{-1} $$
$$ P_k = \big(I - K_k \mathbf{H}_\mathbf{h}(\hat{X}_k)\big)\hat{P}_K$$
In Pyro, all we need to do is create an `EKFState` object and use its `predict` and `update` methods. Pyro will do exact inference to compute the innovations and we will use SVI to learn a MAP estimate of the position and measurement covariances.
As an example, let's look at an object moving at near-constant velocity in 2-D in a discrete time space over 100 time steps.
```
import os
import math
import torch
import pyro
import pyro.distributions as dist
from pyro.infer.autoguide import AutoDelta
from pyro.optim import Adam
from pyro.infer import SVI, Trace_ELBO, config_enumerate
from pyro.contrib.tracking.extended_kalman_filter import EKFState
from pyro.contrib.tracking.distributions import EKFDistribution
from pyro.contrib.tracking.dynamic_models import NcvContinuous
from pyro.contrib.tracking.measurements import PositionMeasurement
smoke_test = ('CI' in os.environ)
assert pyro.__version__.startswith('0.5.1')
pyro.enable_validation(True)
dt = 1e-2
num_frames = 10
dim = 4
# Continuous model
ncv = NcvContinuous(dim, 2.0)
# Truth trajectory
xs_truth = torch.zeros(num_frames, dim)
# initial direction
theta0_truth = 0.0
# initial state
with torch.no_grad():
xs_truth[0, :] = torch.tensor([0.0, 0.0, math.cos(theta0_truth), math.sin(theta0_truth)])
for frame_num in range(1, num_frames):
# sample independent process noise
dx = pyro.sample('process_noise_{}'.format(frame_num), ncv.process_noise_dist(dt))
xs_truth[frame_num, :] = ncv(xs_truth[frame_num-1, :], dt=dt) + dx
```
Next, let's specify the measurements. Notice that we only measure the positions of the particle.
```
# Measurements
measurements = []
mean = torch.zeros(2)
# no correlations
cov = 1e-5 * torch.eye(2)
with torch.no_grad():
# sample independent measurement noise
dzs = pyro.sample('dzs', dist.MultivariateNormal(mean, cov).expand((num_frames,)))
# compute measurement means
zs = xs_truth[:, :2] + dzs
```
We'll use a [Delta autoguide](http://docs.pyro.ai/en/dev/infer.autoguide.html#autodelta) to learn MAP estimates of the position and measurement covariances. The `EKFDistribution` computes the joint log density of all of the EKF states given a tensor of sequential measurements.
```
def model(data):
# a HalfNormal can be used here as well
R = pyro.sample('pv_cov', dist.HalfCauchy(2e-6)) * torch.eye(4)
Q = pyro.sample('measurement_cov', dist.HalfCauchy(1e-6)) * torch.eye(2)
# observe the measurements
pyro.sample('track_{}'.format(i), EKFDistribution(xs_truth[0], R, ncv,
Q, time_steps=num_frames),
obs=data)
guide = AutoDelta(model) # MAP estimation
optim = pyro.optim.Adam({'lr': 2e-2})
svi = SVI(model, guide, optim, loss=Trace_ELBO(retain_graph=True))
pyro.set_rng_seed(0)
pyro.clear_param_store()
for i in range(250 if not smoke_test else 2):
loss = svi.step(zs)
if not i % 10:
print('loss: ', loss)
# retrieve states for visualization
R = guide()['pv_cov'] * torch.eye(4)
Q = guide()['measurement_cov'] * torch.eye(2)
ekf_dist = EKFDistribution(xs_truth[0], R, ncv, Q, time_steps=num_frames)
states= ekf_dist.filter_states(zs)
```
|
github_jupyter
|
import os
import math
import torch
import pyro
import pyro.distributions as dist
from pyro.infer.autoguide import AutoDelta
from pyro.optim import Adam
from pyro.infer import SVI, Trace_ELBO, config_enumerate
from pyro.contrib.tracking.extended_kalman_filter import EKFState
from pyro.contrib.tracking.distributions import EKFDistribution
from pyro.contrib.tracking.dynamic_models import NcvContinuous
from pyro.contrib.tracking.measurements import PositionMeasurement
smoke_test = ('CI' in os.environ)
assert pyro.__version__.startswith('0.5.1')
pyro.enable_validation(True)
dt = 1e-2
num_frames = 10
dim = 4
# Continuous model
ncv = NcvContinuous(dim, 2.0)
# Truth trajectory
xs_truth = torch.zeros(num_frames, dim)
# initial direction
theta0_truth = 0.0
# initial state
with torch.no_grad():
xs_truth[0, :] = torch.tensor([0.0, 0.0, math.cos(theta0_truth), math.sin(theta0_truth)])
for frame_num in range(1, num_frames):
# sample independent process noise
dx = pyro.sample('process_noise_{}'.format(frame_num), ncv.process_noise_dist(dt))
xs_truth[frame_num, :] = ncv(xs_truth[frame_num-1, :], dt=dt) + dx
# Measurements
measurements = []
mean = torch.zeros(2)
# no correlations
cov = 1e-5 * torch.eye(2)
with torch.no_grad():
# sample independent measurement noise
dzs = pyro.sample('dzs', dist.MultivariateNormal(mean, cov).expand((num_frames,)))
# compute measurement means
zs = xs_truth[:, :2] + dzs
def model(data):
# a HalfNormal can be used here as well
R = pyro.sample('pv_cov', dist.HalfCauchy(2e-6)) * torch.eye(4)
Q = pyro.sample('measurement_cov', dist.HalfCauchy(1e-6)) * torch.eye(2)
# observe the measurements
pyro.sample('track_{}'.format(i), EKFDistribution(xs_truth[0], R, ncv,
Q, time_steps=num_frames),
obs=data)
guide = AutoDelta(model) # MAP estimation
optim = pyro.optim.Adam({'lr': 2e-2})
svi = SVI(model, guide, optim, loss=Trace_ELBO(retain_graph=True))
pyro.set_rng_seed(0)
pyro.clear_param_store()
for i in range(250 if not smoke_test else 2):
loss = svi.step(zs)
if not i % 10:
print('loss: ', loss)
# retrieve states for visualization
R = guide()['pv_cov'] * torch.eye(4)
Q = guide()['measurement_cov'] * torch.eye(2)
ekf_dist = EKFDistribution(xs_truth[0], R, ncv, Q, time_steps=num_frames)
states= ekf_dist.filter_states(zs)
| 0.642096 | 0.990112 |
```
# Load packages
import tensorflow as tf
import pandas as pd
from tensorflow import keras
import numpy as np
import pandas as pd
import os
import scipy as scp
import scipy.stats as scps
import time
from datetime import datetime
# Load my own functions
import dnnregressor_train_eval_keras as dnnk
from kde_training_utilities import kde_load_data
from kde_training_utilities import kde_make_train_test_split
import make_data_wfpt as mdw
# Handle some cuda business
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
# Make dnnk class (cpm for choice probability model)
cpm = dnnk.dnn_trainer()
# Define folder in which dataset lies
data_folder = '/media/data_cifs/afengler/data/kde/ddm/train_test_data/'
# Make train test split
kde_make_train_test_split(folder = data_folder,
p_train = 0.8)
# Load train test split
cpm.data['train_features'], cpm.data['train_labels'], cpm.data['test_features'], cpm.data['test_labels'] = kde_load_data(folder = data_folder)
cpm.data['test_features'].shape
cpm.data['train_features']
cpm.data['train_features'].iloc[171247010, :]
cpm.data['train_features']['log_l'] = cpm.data['train_labels']
cpm.data['train_features'].sort_values(by = 'log_l')
cpm.data['train_features']
cpm.data['train_features'].iloc[22428, :]
cpm.data['train_labels'][22428, ]
# Make all parameters we can specify explicit
# Model parameters
cpm.model_params
# Parameters governing training
cpm.train_params
# Parameters concerning data storage
cpm.data_params
# If necessary, specify new set of parameters here:
# Model params
cpm.model_params['output_activation'] = 'linear'
cpm.model_params['hidden_layers'] = [20, 40, 60, 80, 100, 120]
cpm.model_params['hidden_activations'] = ['relu', 'relu', 'relu', 'relu', 'relu', 'relu']
cpm.model_params['input_shape'] = cpm.data['train_features'].shape[1]
# cpm.model_params['l1_activation'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# cpm.model_params['l2_activation'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
cpm.model_params['l1_kernel'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
cpm.model_params['l2_kernel'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# Train params
cpm.train_params['batch_size'] = 1000000
cpm.train_params['max_train_epochs'] = 250
cpm.train_params['min_delta'] = 0.00001
# Data params
cpm.data_params['data_type'] = 'kde'
cpm.data_params['data_type_signature'] = '_ddm_linear_collapse_'
cpm.data_params['training_data_size'] = cpm.data['train_features'].shape[0]
cpm.data_params['timestamp'] = datetime.now().strftime('%m_%d_%y_%H_%M_%S')
cpm.data_params['model_directory'] = '/media/data_cifs/afengler/data/kde/linear_collapse/keras_models'
# Make model
cpm.keras_model_generate(save_model = True)
# Train model
cpm.run_training(save_history = True,
warm_start = False)
```
|
github_jupyter
|
# Load packages
import tensorflow as tf
import pandas as pd
from tensorflow import keras
import numpy as np
import pandas as pd
import os
import scipy as scp
import scipy.stats as scps
import time
from datetime import datetime
# Load my own functions
import dnnregressor_train_eval_keras as dnnk
from kde_training_utilities import kde_load_data
from kde_training_utilities import kde_make_train_test_split
import make_data_wfpt as mdw
# Handle some cuda business
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
# Make dnnk class (cpm for choice probability model)
cpm = dnnk.dnn_trainer()
# Define folder in which dataset lies
data_folder = '/media/data_cifs/afengler/data/kde/ddm/train_test_data/'
# Make train test split
kde_make_train_test_split(folder = data_folder,
p_train = 0.8)
# Load train test split
cpm.data['train_features'], cpm.data['train_labels'], cpm.data['test_features'], cpm.data['test_labels'] = kde_load_data(folder = data_folder)
cpm.data['test_features'].shape
cpm.data['train_features']
cpm.data['train_features'].iloc[171247010, :]
cpm.data['train_features']['log_l'] = cpm.data['train_labels']
cpm.data['train_features'].sort_values(by = 'log_l')
cpm.data['train_features']
cpm.data['train_features'].iloc[22428, :]
cpm.data['train_labels'][22428, ]
# Make all parameters we can specify explicit
# Model parameters
cpm.model_params
# Parameters governing training
cpm.train_params
# Parameters concerning data storage
cpm.data_params
# If necessary, specify new set of parameters here:
# Model params
cpm.model_params['output_activation'] = 'linear'
cpm.model_params['hidden_layers'] = [20, 40, 60, 80, 100, 120]
cpm.model_params['hidden_activations'] = ['relu', 'relu', 'relu', 'relu', 'relu', 'relu']
cpm.model_params['input_shape'] = cpm.data['train_features'].shape[1]
# cpm.model_params['l1_activation'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# cpm.model_params['l2_activation'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
cpm.model_params['l1_kernel'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
cpm.model_params['l2_kernel'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# Train params
cpm.train_params['batch_size'] = 1000000
cpm.train_params['max_train_epochs'] = 250
cpm.train_params['min_delta'] = 0.00001
# Data params
cpm.data_params['data_type'] = 'kde'
cpm.data_params['data_type_signature'] = '_ddm_linear_collapse_'
cpm.data_params['training_data_size'] = cpm.data['train_features'].shape[0]
cpm.data_params['timestamp'] = datetime.now().strftime('%m_%d_%y_%H_%M_%S')
cpm.data_params['model_directory'] = '/media/data_cifs/afengler/data/kde/linear_collapse/keras_models'
# Make model
cpm.keras_model_generate(save_model = True)
# Train model
cpm.run_training(save_history = True,
warm_start = False)
| 0.507324 | 0.165762 |
<small><i>This notebook was put together by [Jake Vanderplas](http://www.vanderplas.com). Source and license info is on [GitHub](https://github.com/jakevdp/sklearn_tutorial/).</i></small>
# Validation and Model Selection
In this section, we'll look at *model evaluation* and the tuning of *hyperparameters*, which are parameters that define the model.
```
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
```
## Validating Models
One of the most important pieces of machine learning is **model validation**: that is, checking how well your model fits a given dataset. But there are some pitfalls you need to watch out for.
Consider the digits example we've been looking at previously. How might we check how well our model fits the data?
```
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data
y = digits.target
```
Let's fit a K-neighbors classifier
```
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X, y)
```
Now we'll use this classifier to *predict* labels for the data
```
y_pred = knn.predict(X)
```
Finally, we can check how well our prediction did:
```
print("{0} / {1} correct".format(np.sum(y == y_pred), len(y)))
```
It seems we have a perfect classifier!
**Question: what's wrong with this?**
## Validation Sets
Above we made the mistake of testing our data on the same set of data that was used for training. **This is not generally a good idea**. If we optimize our estimator this way, we will tend to **over-fit** the data: that is, we learn the noise.
A better way to test a model is to use a hold-out set which doesn't enter the training. We've seen this before using scikit-learn's train/test split utility:
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
X_train.shape, X_test.shape
```
Now we train on the training data, and validate on the test data:
```
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print("{0} / {1} correct".format(np.sum(y_test == y_pred), len(y_test)))
```
This gives us a more reliable estimate of how our model is doing.
The metric we're using here, comparing the number of matches to the total number of samples, is known as the **accuracy score**, and can be computed using the following routine:
```
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
```
This can also be computed directly from the ``model.score`` method:
```
knn.score(X_test, y_test)
```
Using this, we can ask how this changes as we change the model parameters, in this case the number of neighbors:
```
for n_neighbors in [1, 5, 10, 20, 30]:
knn = KNeighborsClassifier(n_neighbors)
knn.fit(X_train, y_train)
print(n_neighbors, knn.score(X_test, y_test))
```
We see that in this case, a small number of neighbors seems to be the best option.
## Cross-Validation
One problem with validation sets is that you "lose" some of the data. Above, we've only used 3/4 of the data for the training, and used 1/4 for the validation. Another option is to use **2-fold cross-validation**, where we split the sample in half and perform the validation twice:
```
X1, X2, y1, y2 = train_test_split(X, y, test_size=0.5, random_state=0)
X1.shape, X2.shape
print(KNeighborsClassifier(1).fit(X2, y2).score(X1, y1))
print(KNeighborsClassifier(1).fit(X1, y1).score(X2, y2))
```
Thus a two-fold cross-validation gives us two estimates of the score for that parameter.
Because this is a bit of a pain to do by hand, scikit-learn has a utility routine to help:
```
from sklearn.model_selection import cross_val_score
cv = cross_val_score(KNeighborsClassifier(1), X, y, cv=10)
cv.mean()
```
### K-fold Cross-Validation
Here we've used 2-fold cross-validation. This is just one specialization of $K$-fold cross-validation, where we split the data into $K$ chunks and perform $K$ fits, where each chunk gets a turn as the validation set.
We can do this by changing the ``cv`` parameter above. Let's do 10-fold cross-validation:
```
cross_val_score(KNeighborsClassifier(1), X, y, cv=10)
```
This gives us an even better idea of how well our model is doing.
## Overfitting, Underfitting and Model Selection
Now that we've gone over the basics of validation, and cross-validation, it's time to go into even more depth regarding model selection.
The issues associated with validation and
cross-validation are some of the most important
aspects of the practice of machine learning. Selecting the optimal model
for your data is vital, and is a piece of the problem that is not often
appreciated by machine learning practitioners.
Of core importance is the following question:
**If our estimator is underperforming, how should we move forward?**
- Use simpler or more complicated model?
- Add more features to each observed data point?
- Add more training samples?
The answer is often counter-intuitive. In particular, **Sometimes using a
more complicated model will give _worse_ results.** Also, **Sometimes adding
training data will not improve your results.** The ability to determine
what steps will improve your model is what separates the successful machine
learning practitioners from the unsuccessful.
### Illustration of the Bias-Variance Tradeoff
For this section, we'll work with a simple 1D regression problem. This will help us to
easily visualize the data and the model, and the results generalize easily to higher-dimensional
datasets. We'll explore a simple **linear regression** problem.
This can be accomplished within scikit-learn with the `sklearn.linear_model` module.
We'll create a simple nonlinear function that we'd like to fit
```
def test_func(x, err=0.5):
y = 10 - 1. / (x + 0.1)
if err > 0:
y = np.random.normal(y, err)
return y
```
Now let's create a realization of this dataset:
```
def make_data(N=40, error=1.0, random_seed=1):
# randomly sample the data
np.random.seed(1)
X = np.random.random(N)[:, np.newaxis]
y = test_func(X.ravel(), error)
return X, y
X, y = make_data(40, error=1)
plt.scatter(X.ravel(), y);
```
Now say we want to perform a regression on this data. Let's use the built-in linear regression function to compute a fit:
```
X_test = np.linspace(-0.1, 1.1, 500)[:, None]
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
model = LinearRegression()
model.fit(X, y)
y_test = model.predict(X_test)
plt.scatter(X.ravel(), y)
plt.plot(X_test.ravel(), y_test)
plt.title("mean squared error: {0:.3g}".format(mean_squared_error(model.predict(X), y)));
```
We have fit a straight line to the data, but clearly this model is not a good choice. We say that this model is **biased**, or that it **under-fits** the data.
Let's try to improve this by creating a more complicated model. We can do this by adding degrees of freedom, and computing a polynomial regression over the inputs. Scikit-learn makes this easy with the ``PolynomialFeatures`` preprocessor, which can be pipelined with a linear regression.
Let's make a convenience routine to do this:
```
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
```
Now we'll use this to fit a quadratic curve to the data.
```
model = PolynomialRegression(2)
model.fit(X, y)
y_test = model.predict(X_test)
plt.scatter(X.ravel(), y)
plt.plot(X_test.ravel(), y_test)
plt.title("mean squared error: {0:.3g}".format(mean_squared_error(model.predict(X), y)));
```
This reduces the mean squared error, and makes a much better fit. What happens if we use an even higher-degree polynomial?
```
model = PolynomialRegression(30)
model.fit(X, y)
y_test = model.predict(X_test)
plt.scatter(X.ravel(), y)
plt.plot(X_test.ravel(), y_test)
plt.title("mean squared error: {0:.3g}".format(mean_squared_error(model.predict(X), y)))
plt.ylim(-4, 14);
```
When we increase the degree to this extent, it's clear that the resulting fit is no longer reflecting the true underlying distribution, but is more sensitive to the noise in the training data. For this reason, we call it a **high-variance model**, and we say that it **over-fits** the data.
Just for fun, let's use IPython's interact capability (only in IPython 2.0+) to explore this interactively:
```
from IPython.html.widgets import interact
def plot_fit(degree=1, Npts=50):
X, y = make_data(Npts, error=1)
X_test = np.linspace(-0.1, 1.1, 500)[:, None]
model = PolynomialRegression(degree=degree)
model.fit(X, y)
y_test = model.predict(X_test)
plt.scatter(X.ravel(), y)
plt.plot(X_test.ravel(), y_test)
plt.ylim(-4, 14)
plt.title("mean squared error: {0:.2f}".format(mean_squared_error(model.predict(X), y)))
interact(plot_fit, degree=[1, 30], Npts=[2, 100]);
```
### Detecting Over-fitting with Validation Curves
Clearly, computing the error on the training data is not enough (we saw this previously). As above, we can use **cross-validation** to get a better handle on how the model fit is working.
Let's do this here, again using the ``validation_curve`` utility. To make things more clear, we'll use a slightly larger dataset:
```
X, y = make_data(120, error=1.0)
plt.scatter(X, y);
from sklearn.model_selection import validation_curve
def rms_error(model, X, y):
y_pred = model.predict(X)
return np.sqrt(np.mean((y - y_pred) ** 2))
degree = np.arange(0, 18)
val_train, val_test = validation_curve(PolynomialRegression(), X, y,
'polynomialfeatures__degree', degree, cv=7,
scoring=rms_error)
```
Now let's plot the validation curves:
```
def plot_with_err(x, data, **kwargs):
mu, std = data.mean(1), data.std(1)
lines = plt.plot(x, mu, '-', **kwargs)
plt.fill_between(x, mu - std, mu + std, edgecolor='none',
facecolor=lines[0].get_color(), alpha=0.2)
plot_with_err(degree, val_train, label='training scores')
plot_with_err(degree, val_test, label='validation scores')
plt.xlabel('degree'); plt.ylabel('rms error')
plt.legend();
```
Notice the trend here, which is common for this type of plot.
1. For a small model complexity, the training error and validation error are very similar. This indicates that the model is **under-fitting** the data: it doesn't have enough complexity to represent the data. Another way of putting it is that this is a **high-bias** model.
2. As the model complexity grows, the training and validation scores diverge. This indicates that the model is **over-fitting** the data: it has so much flexibility, that it fits the noise rather than the underlying trend. Another way of putting it is that this is a **high-variance** model.
3. Note that the training score (nearly) always improves with model complexity. This is because a more complicated model can fit the noise better, so the model improves. The validation data generally has a sweet spot, which here is around 5 terms.
Here's our best-fit model according to the cross-validation:
```
model = PolynomialRegression(4).fit(X, y)
plt.scatter(X, y)
plt.plot(X_test, model.predict(X_test));
```
### Detecting Data Sufficiency with Learning Curves
As you might guess, the exact turning-point of the tradeoff between bias and variance is highly dependent on the number of training points used. Here we'll illustrate the use of *learning curves*, which display this property.
The idea is to plot the mean-squared-error for the training and test set as a function of *Number of Training Points*
```
from sklearn.model_selection import learning_curve
def plot_learning_curve(degree=3):
train_sizes = np.linspace(0.05, 1, 120)
N_train, val_train, val_test = learning_curve(PolynomialRegression(degree),
X, y, train_sizes, cv=5,
scoring=rms_error)
plot_with_err(N_train, val_train, label='training scores')
plot_with_err(N_train, val_test, label='validation scores')
plt.xlabel('Training Set Size'); plt.ylabel('rms error')
plt.ylim(0, 3)
plt.xlim(5, 80)
plt.legend()
```
Let's see what the learning curves look like for a linear model:
```
plot_learning_curve(1)
```
This shows a typical learning curve: for very few training points, there is a large separation between the training and test error, which indicates **over-fitting**. Given the same model, for a large number of training points, the training and testing errors converge, which indicates potential **under-fitting**.
As you add more data points, the training error will never increase, and the testing error will never decrease (why do you think this is?)
It is easy to see that, in this plot, if you'd like to reduce the MSE down to the nominal value of 1.0 (which is the magnitude of the scatter we put in when constructing the data), then adding more samples will *never* get you there. For $d=1$, the two curves have converged and cannot move lower. What about for a larger value of $d$?
```
plot_learning_curve(3)
```
Here we see that by adding more model complexity, we've managed to lower the level of convergence to an rms error of 1.0!
What if we get even more complex?
```
plot_learning_curve(10)
```
For an even more complex model, we still converge, but the convergence only happens for *large* amounts of training data.
So we see the following:
- you can **cause the lines to converge** by adding more points or by simplifying the model.
- you can **bring the convergence error down** only by increasing the complexity of the model.
Thus these curves can give you hints about how you might improve a sub-optimal model. If the curves are already close together, you need more model complexity. If the curves are far apart, you might also improve the model by adding more data.
To make this more concrete, imagine some telescope data in which the results are not robust enough. You must think about whether to spend your valuable telescope time observing *more objects* to get a larger training set, or *more attributes of each object* in order to improve the model. The answer to this question has real consequences, and can be addressed using these metrics.
## Summary
We've gone over several useful tools for model validation
- The **Training Score** shows how well a model fits the data it was trained on. This is not a good indication of model effectiveness
- The **Validation Score** shows how well a model fits hold-out data. The most effective method is some form of cross-validation, where multiple hold-out sets are used.
- **Validation Curves** are a plot of validation score and training score as a function of **model complexity**:
+ when the two curves are close, it indicates *underfitting*
+ when the two curves are separated, it indicates *overfitting*
+ the "sweet spot" is in the middle
- **Learning Curves** are a plot of the validation score and training score as a function of **Number of training samples**
+ when the curves are close, it indicates *underfitting*, and adding more data will not generally improve the estimator.
+ when the curves are far apart, it indicates *overfitting*, and adding more data may increase the effectiveness of the model.
These tools are powerful means of evaluating your model on your data.
|
github_jupyter
|
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data
y = digits.target
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X, y)
y_pred = knn.predict(X)
print("{0} / {1} correct".format(np.sum(y == y_pred), len(y)))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
X_train.shape, X_test.shape
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print("{0} / {1} correct".format(np.sum(y_test == y_pred), len(y_test)))
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
knn.score(X_test, y_test)
for n_neighbors in [1, 5, 10, 20, 30]:
knn = KNeighborsClassifier(n_neighbors)
knn.fit(X_train, y_train)
print(n_neighbors, knn.score(X_test, y_test))
X1, X2, y1, y2 = train_test_split(X, y, test_size=0.5, random_state=0)
X1.shape, X2.shape
print(KNeighborsClassifier(1).fit(X2, y2).score(X1, y1))
print(KNeighborsClassifier(1).fit(X1, y1).score(X2, y2))
from sklearn.model_selection import cross_val_score
cv = cross_val_score(KNeighborsClassifier(1), X, y, cv=10)
cv.mean()
cross_val_score(KNeighborsClassifier(1), X, y, cv=10)
def test_func(x, err=0.5):
y = 10 - 1. / (x + 0.1)
if err > 0:
y = np.random.normal(y, err)
return y
def make_data(N=40, error=1.0, random_seed=1):
# randomly sample the data
np.random.seed(1)
X = np.random.random(N)[:, np.newaxis]
y = test_func(X.ravel(), error)
return X, y
X, y = make_data(40, error=1)
plt.scatter(X.ravel(), y);
X_test = np.linspace(-0.1, 1.1, 500)[:, None]
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
model = LinearRegression()
model.fit(X, y)
y_test = model.predict(X_test)
plt.scatter(X.ravel(), y)
plt.plot(X_test.ravel(), y_test)
plt.title("mean squared error: {0:.3g}".format(mean_squared_error(model.predict(X), y)));
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
model = PolynomialRegression(2)
model.fit(X, y)
y_test = model.predict(X_test)
plt.scatter(X.ravel(), y)
plt.plot(X_test.ravel(), y_test)
plt.title("mean squared error: {0:.3g}".format(mean_squared_error(model.predict(X), y)));
model = PolynomialRegression(30)
model.fit(X, y)
y_test = model.predict(X_test)
plt.scatter(X.ravel(), y)
plt.plot(X_test.ravel(), y_test)
plt.title("mean squared error: {0:.3g}".format(mean_squared_error(model.predict(X), y)))
plt.ylim(-4, 14);
from IPython.html.widgets import interact
def plot_fit(degree=1, Npts=50):
X, y = make_data(Npts, error=1)
X_test = np.linspace(-0.1, 1.1, 500)[:, None]
model = PolynomialRegression(degree=degree)
model.fit(X, y)
y_test = model.predict(X_test)
plt.scatter(X.ravel(), y)
plt.plot(X_test.ravel(), y_test)
plt.ylim(-4, 14)
plt.title("mean squared error: {0:.2f}".format(mean_squared_error(model.predict(X), y)))
interact(plot_fit, degree=[1, 30], Npts=[2, 100]);
X, y = make_data(120, error=1.0)
plt.scatter(X, y);
from sklearn.model_selection import validation_curve
def rms_error(model, X, y):
y_pred = model.predict(X)
return np.sqrt(np.mean((y - y_pred) ** 2))
degree = np.arange(0, 18)
val_train, val_test = validation_curve(PolynomialRegression(), X, y,
'polynomialfeatures__degree', degree, cv=7,
scoring=rms_error)
def plot_with_err(x, data, **kwargs):
mu, std = data.mean(1), data.std(1)
lines = plt.plot(x, mu, '-', **kwargs)
plt.fill_between(x, mu - std, mu + std, edgecolor='none',
facecolor=lines[0].get_color(), alpha=0.2)
plot_with_err(degree, val_train, label='training scores')
plot_with_err(degree, val_test, label='validation scores')
plt.xlabel('degree'); plt.ylabel('rms error')
plt.legend();
model = PolynomialRegression(4).fit(X, y)
plt.scatter(X, y)
plt.plot(X_test, model.predict(X_test));
from sklearn.model_selection import learning_curve
def plot_learning_curve(degree=3):
train_sizes = np.linspace(0.05, 1, 120)
N_train, val_train, val_test = learning_curve(PolynomialRegression(degree),
X, y, train_sizes, cv=5,
scoring=rms_error)
plot_with_err(N_train, val_train, label='training scores')
plot_with_err(N_train, val_test, label='validation scores')
plt.xlabel('Training Set Size'); plt.ylabel('rms error')
plt.ylim(0, 3)
plt.xlim(5, 80)
plt.legend()
plot_learning_curve(1)
plot_learning_curve(3)
plot_learning_curve(10)
| 0.85741 | 0.989771 |
# Data Analysis -- Measuring Coronavirus Response Consistency -- KWIC Analysis
## This notebook creates KWICs centered around the word 'coronavirus' with 10 words before and after it. I explain the purpose of these KWICS, which I called 'context chunks' in my blog post, below.
```
%run data_processing.ipynb
```
### KWICs of Fox vs. CNN usage of the word 'coronavirus'
### Key word in context (KWIC) analysis might give a sense of the consistency of the messaging surrounding the word 'coronavirus' between Fox's and CNN's news broadcasts. I analyzed a random sample of 10 KWICs for each news outlet in both February and April, where each KWIC centers around the word coronavirus, and contains the 10 words before and 10 words after the word 'coronavirus.' By analyzing February and April as well as February vs. April KWICs for each news outlet, I will be able to assess how consistent each news outlet's coronanvirus response/messaging varies within the same month as well as over time. I hypothesized that Fox's messaging/response would be inconsistent in both February and April and over time, in line with their more inconsistent sentiment revealed through sentiment analysis and VAD analysis above. I also hypothesized that CNN's messaging/response would be more consistent in both February and April and over time, which would be consistent with what CNN's sentiment appears to have revealed from sentiment and VAD analysis above. If my hypotheses are correct, then it would provide additional evidence to support the conclusion that Fox and CNN differ in the consistency of their coronavrus responses, with evidence from both messaging and sentiment.
```
#Fox Data Split By Date
feb_fox=data_fox[(data_fox['full_date']>='2020-02-01') & (data_fox['full_date']<='2020-02-29')]
march_fox=data_fox[(data_fox['full_date']>='2020-03-01') & (data_fox['full_date']<='2020-03-31')]
april_fox=data_fox[(data_fox['full_date']>='2020-04-01') & (data_fox['full_date']<='2020-04-30')]
#CNN Data Split By Date
feb_cnn=data_cnn[(data_cnn['full_date']>='2020-02-01') & (data_cnn['full_date']<='2020-02-29')]
march_cnn=data_cnn[(data_cnn['full_date']>='2020-03-01') & (data_cnn['full_date']<='2020-03-31')]
april_cnn=data_cnn[(data_cnn['full_date']>='2020-04-01') & (data_cnn['full_date']<='2020-04-30')]
full_toks_fox_feb=[]
for i in feb_fox.index:
toks=tokenize(feb_fox['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_fox_feb.append(t)
full_toks_fox_march=[]
for i in march_fox.index:
toks=tokenize(march_fox['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_fox_march.append(t)
full_toks_fox_april=[]
for i in april_fox.index:
toks=tokenize(april_fox['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_fox_april.append(t)
full_toks_cnn_feb=[]
for i in feb_cnn.index:
toks=tokenize(feb_cnn['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_cnn_feb.append(t)
full_toks_cnn_march=[]
for i in march_cnn.index:
toks=tokenize(march_cnn['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_cnn_march.append(t)
full_toks_cnn_april=[]
for i in april_cnn.index:
toks=tokenize(april_cnn['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_cnn_april.append(t)
#Fox Coronavirus KWIC
kwic_fox_coronavirus_feb=make_kwic('coronavirus',full_toks_fox_feb,10)
kwic_fox_coronavirus_march=make_kwic('coronavirus',full_toks_fox_march,10)
kwic_fox_coronavirus_april=make_kwic('coronavirus',full_toks_fox_april,10)
#CNN Coronavirus KWIC
kwic_cnn_coronavirus_feb=make_kwic('coronavirus',full_toks_cnn_feb,10)
kwic_cnn_coronavirus_march=make_kwic('coronavirus',full_toks_cnn_march,10)
kwic_cnn_coronavirus_april=make_kwic('coronavirus',full_toks_cnn_april,10)
import random
random.seed(30)
kwic_fox_coronavirus_sample_feb=random.sample(kwic_fox_coronavirus_feb,10)
kwic_fox_coronavirus_sample_march=random.sample(kwic_fox_coronavirus_march,10)
kwic_fox_coronavirus_sample_april=random.sample(kwic_fox_coronavirus_april,10)
kwic_cnn_coronavirus_sample_feb=random.sample(kwic_cnn_coronavirus_feb,10)
kwic_cnn_coronavirus_sample_march=random.sample(kwic_cnn_coronavirus_march,10)
kwic_cnn_coronavirus_sample_april=random.sample(kwic_cnn_coronavirus_april,10)
```
### Fox February KWICs -- Random Sample of 10
```
print("Fox KWIC Coronavirus February Broadcasts"+"\n")
print_kwic(kwic_fox_coronavirus_sample_feb)
```
### In February, Fox's messaging surrounding the coronnavirus appears to have been all over the place. At times they characterized coronavirus as "deadly," and covered the number of cases, deaths, and fatality rates closely. However, at other times they downplayed the coronavirus by comparing it to the flu and telling Americans to keep that in mind. For example, in one of the randomly sampled February KWICs above, Fox said "flu in florida they haven't had a single diagnosis of coronavirus so it's important for americans to keep things in perspective folks." Fox's messaging surrounding the coronnavirus is inconsistent within the month of February, as they both tell viewers that coronavirus is deadly and "appears to kill about two percent of the people," while also downplaying the virus by comparing it to the flu and telling viewers to keep that in mind.
### Fox April KWICs -- Random Sample of 10
```
print("Fox KWIC Coronavirus April Broadcasts"+"\n")
print_kwic(kwic_fox_coronavirus_sample_april)
```
### In April, Fox's messaging surrounding the coroanvirus also appears to be inconsistent. They appear to have maintained the same messaging tactic and are all over the place. At times they appear to be taking the virus seriously, mentioning that "if we did nothing about the coronavirus we could have had as many as three million" deaths. However, at other times they continue to compare coronavirus to the flu and downplaying the pandemic and the coronavirus. For example, in one of the KWICs above, Fox mentions that "the flu is you know way worse than the coronanvirs." Within the month of April, Fox's messaging remains to be inconsistent. So overall, over time, Fox's messaging in response to the coronavirus has been consistently inconsistent. This is consistent with the inconsistency over time that Fox's news broadcasts display in sentiment, valence, arousal, and dominannce. Overall, this evidence suggests that Fox's response and messaging to the coronavirus has been rather inconsistent over time.
### CNN February KWICs -- Random Sample of 10
```
print("CNN KWIC Coronavirus February"+"\n")
print_kwic(kwic_cnn_coronavirus_sample_feb)
```
### In February, CNN's messaging surrounding the coronavirus does not appear to have the same inconsistencies that Fox's February messaging appears to have displayed. From the random sample of KWICs displayed above, CNN appears to have focused its messaging around the spread of the coronavirus, the results of coronavirus testing, and the economic impact of the coronavirus' spread across the globe. They appear to only be taking the virus seriously, and use words including "risk," and phrases including "serious concerns." They do not downplay the coronavirus at the same time. CNN's messaging appears to be consistent within the month of February.
### CNN April KWICs -- Random Sample of 10
```
print("CNN KWIC Coronavirus April"+"\n")
print_kwic(kwic_cnn_coronavirus_sample_april)
```
### In April, CNN's messaging surrounding the coronavirus also does not appear to have the same inconsistencies that Fox's April messaging surrounding the coronnavirus appears to have displayed. From the random sample of KWICs shown above, within the month of April, CNN appears to have focussed its messaging on critisizing the president's response to the coronavirus, talking about the impact of the coronavirus on the globe's geopolitical landscape and other countries, talking about the coronavirus' continued spread, and talking about the deaths coronavirus has caused. Just like in February, in April CNN appears to have only taken the virus seriously, using phrases including "very grim." CNN does not downplay the coronavirus at the same time. CNN's messaging appears to be consistent within the month of April. So overall, over time, CNN's messaging in response to the coronavirus has been consistent; they are both consistent within given months as well as over time between months. This is consistent with the higher consistency over time that CNN's news broadcasts display in sentiment, valence, arousal, and dominance as previously shown.
### Overall, this evidence suggests that CNN's response and messaging to the coronavirus has been rather consistent over time. This contrasts with Fox's response and messaging surrounding the coronavirus which has been inconsistent over time. This conclusions comes from the evidence and findings revealed by sentiment analysis and KWIC analysis.
|
github_jupyter
|
%run data_processing.ipynb
#Fox Data Split By Date
feb_fox=data_fox[(data_fox['full_date']>='2020-02-01') & (data_fox['full_date']<='2020-02-29')]
march_fox=data_fox[(data_fox['full_date']>='2020-03-01') & (data_fox['full_date']<='2020-03-31')]
april_fox=data_fox[(data_fox['full_date']>='2020-04-01') & (data_fox['full_date']<='2020-04-30')]
#CNN Data Split By Date
feb_cnn=data_cnn[(data_cnn['full_date']>='2020-02-01') & (data_cnn['full_date']<='2020-02-29')]
march_cnn=data_cnn[(data_cnn['full_date']>='2020-03-01') & (data_cnn['full_date']<='2020-03-31')]
april_cnn=data_cnn[(data_cnn['full_date']>='2020-04-01') & (data_cnn['full_date']<='2020-04-30')]
full_toks_fox_feb=[]
for i in feb_fox.index:
toks=tokenize(feb_fox['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_fox_feb.append(t)
full_toks_fox_march=[]
for i in march_fox.index:
toks=tokenize(march_fox['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_fox_march.append(t)
full_toks_fox_april=[]
for i in april_fox.index:
toks=tokenize(april_fox['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_fox_april.append(t)
full_toks_cnn_feb=[]
for i in feb_cnn.index:
toks=tokenize(feb_cnn['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_cnn_feb.append(t)
full_toks_cnn_march=[]
for i in march_cnn.index:
toks=tokenize(march_cnn['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_cnn_march.append(t)
full_toks_cnn_april=[]
for i in april_cnn.index:
toks=tokenize(april_cnn['targeted text'][i],True,strip_chars=strip_chars)
for t in toks:
full_toks_cnn_april.append(t)
#Fox Coronavirus KWIC
kwic_fox_coronavirus_feb=make_kwic('coronavirus',full_toks_fox_feb,10)
kwic_fox_coronavirus_march=make_kwic('coronavirus',full_toks_fox_march,10)
kwic_fox_coronavirus_april=make_kwic('coronavirus',full_toks_fox_april,10)
#CNN Coronavirus KWIC
kwic_cnn_coronavirus_feb=make_kwic('coronavirus',full_toks_cnn_feb,10)
kwic_cnn_coronavirus_march=make_kwic('coronavirus',full_toks_cnn_march,10)
kwic_cnn_coronavirus_april=make_kwic('coronavirus',full_toks_cnn_april,10)
import random
random.seed(30)
kwic_fox_coronavirus_sample_feb=random.sample(kwic_fox_coronavirus_feb,10)
kwic_fox_coronavirus_sample_march=random.sample(kwic_fox_coronavirus_march,10)
kwic_fox_coronavirus_sample_april=random.sample(kwic_fox_coronavirus_april,10)
kwic_cnn_coronavirus_sample_feb=random.sample(kwic_cnn_coronavirus_feb,10)
kwic_cnn_coronavirus_sample_march=random.sample(kwic_cnn_coronavirus_march,10)
kwic_cnn_coronavirus_sample_april=random.sample(kwic_cnn_coronavirus_april,10)
print("Fox KWIC Coronavirus February Broadcasts"+"\n")
print_kwic(kwic_fox_coronavirus_sample_feb)
print("Fox KWIC Coronavirus April Broadcasts"+"\n")
print_kwic(kwic_fox_coronavirus_sample_april)
print("CNN KWIC Coronavirus February"+"\n")
print_kwic(kwic_cnn_coronavirus_sample_feb)
print("CNN KWIC Coronavirus April"+"\n")
print_kwic(kwic_cnn_coronavirus_sample_april)
| 0.179028 | 0.891952 |
<img src="images/usm.jpg" width="480" height="240" align="left"/>
# MAT281 - Laboratorio N°01
## Objetivos de la clase
* Reforzar los conceptos básicos de python.
## Contenidos
* [Problema 01](#p1)
* [Problema 02](#p2)
* [Problema 03](#p3)
* [Problema 04](#p4)
<a id='p1'></a>
## Problema 01
### a) Calcular el número $\pi$
En los siglos XVII y XVIII, James Gregory y Gottfried Leibniz descubrieron una serie infinita que sirve para calcular $\pi$:
$$\pi = 4 \sum_{k=1}^{\infty}\dfrac{(-1)^{k+1}}{2k-1} = 4(1-\dfrac{1}{3}+\dfrac{1}{5}-\dfrac{1}{7} + ...) $$
Desarolle un programa para estimar el valor de $\pi$ ocupando el método de Leibniz, donde la entrada del programa debe ser un número entero $n$ que indique cuántos términos de la suma se utilizará.
* **Ejemplo**: *calcular_pi(3)* = 3.466666666666667, *calcular_pi(1000)* = 3.140592653839794
```
def calcular_pi(n:int)->float:
"""
calcular_pi(n)
Aproximacion del valor de pi mediante el método de Leibniz
Parameters
----------
n : int
Numero de terminos.
Returns
-------
output : float
Valor aproximado de pi.
Examples
--------
>>> calcular_pi(3)
3.466666666666667
>>> calcular_pi(1000)
3.140592653839794
"""
pi = 0 # valor incial
for k in range(1,n+1):
numerador = (-1)**(k+1) # numerador de la iteracion i
denominador = 2*k-1 # denominador de la iteracion i
pi+=numerador/denominador # suma hasta el i-esimo termino
return 4*pi
# Acceso a la documentación
help(calcular_pi)
# ejemplo 01
calcular_pi(3)
# ejemplo 02
calcular_pi(1000)
```
**Observación**:
* Note que si corre la línea de comando `calcular_pi(3.0)` le mandará un error ... ¿ por qué ?
* En los laboratorio, no se pide ser tan meticuloso con la documentacion.
* Lo primero es definir el código, correr los ejemplos y luego documentar correctamente.
### b) Calcular el número $e$
Euler realizó varios aportes en relación a $e$, pero no fue hasta 1748 cuando publicó su **Introductio in analysin infinitorum** que dio un tratamiento definitivo a las ideas sobre $e$. Allí mostró que:
En los siglos XVII y XVIII, James Gregory y Gottfried Leibniz descubrieron una serie infinita que sirve para calcular π:
$$e = \sum_{k=0}^{\infty}\dfrac{1}{k!} = 1+\dfrac{1}{2!}+\dfrac{1}{3!}+\dfrac{1}{4!} + ... $$
Desarolle un programa para estimar el valor de $e$ ocupando el método de Euler, donde la entrada del programa debe ser un número entero $n$ que indique cuántos términos de la suma se utilizará.
* **Ejemplo**: *calcular_e(3)* =2.5, *calcular_e(1000)* = 2.7182818284590455
```
def factorial(n:int)->int:
"""
factorial(n)
funcion auxiliar para calcular el factorial de un numero
Parameters
----------
n : int
Numero que se quiere calcular su factorial
Returns
-------
output : int
Factorial del numero.
Examples
--------
>>> factorial(4)
24
"""
if n == 0:
valor = 1
else:
valor = n*factorial(n-1)
return valor
#ejemplo 03
factorial(4)
def calcular_e(n:int)->float:
"""
calcular_e(n)
Aproximacion de e
Parameters
----------
n : int
Numero de terminos
Returns
-------
output : float
Valor aproximado de e.
Examples
--------
>>> calcular_e(3)
2.5
>>> calcular_e(1000)
2.7182818284590455
"""
e = 0 # valor incial
for k in range(1,n):
denominador = factorial(k) # denominador de la iteracion k
e+= 1/denominador # suma hasta el k-esimo termino
return e+1
#ejemplo 04
calcular_e(3)
#ejemplo 05
calcular_e(1000)
```
<a id='p2'></a>
## Problema 02
Sea $\sigma(n)$ definido como la suma de los divisores propios de $n$ (números menores que n que se dividen en $n$).
Los [números amigos](https://en.wikipedia.org/wiki/Amicable_numbers) son enteros positivos $n_1$ y $n_2$ tales que la suma de los divisores propios de uno es igual al otro número y viceversa, es decir, $\sigma(n_1)=\sigma(n_2)$ y $\sigma(n_2)=\sigma(n_1)$.
Por ejemplo, los números 220 y 284 son números amigos.
* los divisores propios de 220 son 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 y 110; por lo tanto $\sigma(220) = 284$.
* los divisores propios de 284 son 1, 2, 4, 71 y 142; entonces $\sigma(284) = 220$.
Implemente una función llamada `amigos` cuyo input sean dos números naturales $n_1$ y $n_2$, cuyo output sea verifique si los números son amigos o no.
* **Ejemplo**: *amigos(220,284)* = True, *amigos(6,5)* = False
```
def suma_divisores(n:int)->int:
"""
suma_divisores(n)
Suma todos los numeros de un numero natural positivo
Parameters
----------
n : int
Numero que se quiere calcular los divisores
Returns
-------
output : int
Valor de la suma de los divisores.
Examples
--------
>>> suma_divisores(220)
284
>>> suma_divisores(284)
220
"""
lista = []
for i in range(1,n):
if n%i == 0: #Determinar si un numero i es divisor de n
lista.append(i) #Se agrega el valor i a la lista
return sum(lista)
#ejemplo 07
suma_divisores(220)
#ejemplo 08
suma_divisores(284)
def amigos(n1:int,n2:int)->bool:
"""
amigos(n1,n2)
Comprueba si dos numeros son amigos bajo la definición de amigos en el enunciado
Parameters
----------
n1 : int
Primer numero.
n2 : int
Segundo numero
Returns
-------
output : bool
Retorna True o False si los numeros son o no amigos respectivamente.
Examples
--------
>>> migos(220,284)
True
>>> amigos(6,5)
False
"""
s1 = suma_divisores(n1) #Variables solo por simplicidad
s2 = suma_divisores(n2)
if s1 == n2 and s2 == n1: #Condicion que compara si la suma del primer numero es igual al otro numero y viceversa
return True
else:
return False
#ejemplo 09
amigos(220,284)
#ejemplo 10
amigos(6,5)
```
<a id='p3'></a>
## Problema 03
La [conjetura de Collatz](https://en.wikipedia.org/wiki/Collatz_conjecture), conocida también como conjetura $3n+1$ o conjetura de Ulam (entre otros nombres), fue enunciada por el matemático Lothar Collatz en 1937, y a la fecha no se ha resuelto.
Sea la siguiente operación, aplicable a cualquier número entero positivo:
* Si el número es par, se divide entre 2.
* Si el número es impar, se multiplica por 3 y se suma 1.
La conjetura dice que siempre alcanzaremos el 1 (y por tanto el ciclo 4, 2, 1) para cualquier número con el que comencemos.
Implemente una función llamada `collatz` cuyo input sea un número natural positivo $N$ y como output devuelva la secuencia de números hasta llegar a 1.
* **Ejemplo**: *collatz(9)* = [9, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
```
def collatz(n:int)->list:
"""
collatz(n)
Determina la secuencia de un numero n hasta llegar a 1
Parameters
----------
n : int
Numero natural positivo.
Returns
-------
output : list
lista con los números que siguen el algoritmo de collatz.
Examples
--------
>>> collatz(9)
[9, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
"""
lista = [n] #creación de la lista con el valor de parámetro
while n!= 1: #ciclo que itera sobre el ultimo valor de n de la lista
if n%2 == 0:
lista.append(int(n/2)) #si el numero es par hace la operación para pares
else:
lista.append(int(3*n +1)) #caso contrario hace la operación para impares
n = lista[-1] #redefine n como el ultimo valor agregado en la lista para la siguiente iteración
return lista
#ejemplo 11
collatz(9)
```
<a id='p4'></a>
## Problema 04
La [conjetura de Goldbach](https://en.wikipedia.org/wiki/Goldbach%27s_conjecture) es uno de los problemas abiertos más antiguos en matemáticas. Concretamente, G.H. Hardy, en 1921, en su famoso discurso pronunciado en la Sociedad Matemática de Copenhague, comentó que probablemente la conjetura de Goldbach no es solo uno de los problemas no resueltos más difíciles de la teoría de números, sino de todas las matemáticas. Su enunciado es el siguiente:
$$\textrm{Todo número par mayor que 2 puede escribirse como suma de dos números primos - Christian Goldbach (1742)}$$
Implemente una función llamada `goldbach` cuyo input sea un número natural positivo $N$ y como output devuelva la suma de dos primos ($N1$ y $N2$) tal que: $N1+N2=N$.
* **Ejemplo**: goldbash(4) = (2,2), goldbash(6) = (3,3) , goldbash(8) = (3,5)
```
import math as m #Libreria para calcular la raiz solo porque numpy no me funciono y ** tampoco
def esPrimo(num:int)->bool:
"""
esPrimo(num)
Corrobora si un numero es o no primo con un algoritmo de testeo
Parameters
----------
num : int
Numero a determinar si es o no primo.
Returns
-------
output : bool
Retorna True o False para decir el numero es primo o no respectivamente.
Examples
--------
>>> esPrimo(7)
True
>>> esPrima(4)
False
"""
if num%2==0 and num!=2: #Descartar los pares
return False
for i in range(3,int(m.sqrt(num))+1,2): #Testeo de si un numero es primo que se hace con la raiz
if num%i==0: #Comprobar la divisibilidad
return False
return True
#ejemplo
esPrimo(7)
#ejemplo
esPrimo(20)
def listaPrimos(limite:int)->list:
"""
listaPrimos(limite)
Genera una lista de numeros primos hasta un valor limite
Parameters
----------
limite : int
Valor hasta el que se calculara la lista.
Returns
-------
output : list
Lista con numeros primos hasta el valor limite.
Examples
--------
>>> listaPrimos(20)
[2, 3, 5, 7, 11, 13, 17, 19]
"""
lista=[2] #el minimo primo que existe es 2
for j in range(3,limite+1,2): #Itera de 2 en 2 para descartar los pares
if esPrimo(j): #Comprueba si el numero es primo y lo agrega a la lista
lista.append(j)
return lista
listaPrimos(20)
def goldbach(n:int)->tuple:
"""
goldbach(n)
Aproximacion del valor de pi mediante el método de Leibniz
Parameters
----------
n : int
Numero de al que se le aplicará la conjetura.
Returns
-------
output : tuple
Tupla con los primos tales que su suma de como resultado n.
Examples
--------
>>> goldbach(4)
(2,2)
"""
lista = listaPrimos(n) #Crea una lista con los primos hasta n
for i in lista:
for k in lista: #Se recorre la lista con dos parametros a comparar
suma = i+k
if n == suma: #Compara si la suma de ambos primos es igual al input
tupla = (i,k) #Si lo logra crea la tupla que se retorna como resultado
return tupla
#ejemplo 12
goldbach(4)
#ejemplo 13
goldbach(6)
#ejemplo 14
goldbach(8)
```
|
github_jupyter
|
def calcular_pi(n:int)->float:
"""
calcular_pi(n)
Aproximacion del valor de pi mediante el método de Leibniz
Parameters
----------
n : int
Numero de terminos.
Returns
-------
output : float
Valor aproximado de pi.
Examples
--------
>>> calcular_pi(3)
3.466666666666667
>>> calcular_pi(1000)
3.140592653839794
"""
pi = 0 # valor incial
for k in range(1,n+1):
numerador = (-1)**(k+1) # numerador de la iteracion i
denominador = 2*k-1 # denominador de la iteracion i
pi+=numerador/denominador # suma hasta el i-esimo termino
return 4*pi
# Acceso a la documentación
help(calcular_pi)
# ejemplo 01
calcular_pi(3)
# ejemplo 02
calcular_pi(1000)
def factorial(n:int)->int:
"""
factorial(n)
funcion auxiliar para calcular el factorial de un numero
Parameters
----------
n : int
Numero que se quiere calcular su factorial
Returns
-------
output : int
Factorial del numero.
Examples
--------
>>> factorial(4)
24
"""
if n == 0:
valor = 1
else:
valor = n*factorial(n-1)
return valor
#ejemplo 03
factorial(4)
def calcular_e(n:int)->float:
"""
calcular_e(n)
Aproximacion de e
Parameters
----------
n : int
Numero de terminos
Returns
-------
output : float
Valor aproximado de e.
Examples
--------
>>> calcular_e(3)
2.5
>>> calcular_e(1000)
2.7182818284590455
"""
e = 0 # valor incial
for k in range(1,n):
denominador = factorial(k) # denominador de la iteracion k
e+= 1/denominador # suma hasta el k-esimo termino
return e+1
#ejemplo 04
calcular_e(3)
#ejemplo 05
calcular_e(1000)
def suma_divisores(n:int)->int:
"""
suma_divisores(n)
Suma todos los numeros de un numero natural positivo
Parameters
----------
n : int
Numero que se quiere calcular los divisores
Returns
-------
output : int
Valor de la suma de los divisores.
Examples
--------
>>> suma_divisores(220)
284
>>> suma_divisores(284)
220
"""
lista = []
for i in range(1,n):
if n%i == 0: #Determinar si un numero i es divisor de n
lista.append(i) #Se agrega el valor i a la lista
return sum(lista)
#ejemplo 07
suma_divisores(220)
#ejemplo 08
suma_divisores(284)
def amigos(n1:int,n2:int)->bool:
"""
amigos(n1,n2)
Comprueba si dos numeros son amigos bajo la definición de amigos en el enunciado
Parameters
----------
n1 : int
Primer numero.
n2 : int
Segundo numero
Returns
-------
output : bool
Retorna True o False si los numeros son o no amigos respectivamente.
Examples
--------
>>> migos(220,284)
True
>>> amigos(6,5)
False
"""
s1 = suma_divisores(n1) #Variables solo por simplicidad
s2 = suma_divisores(n2)
if s1 == n2 and s2 == n1: #Condicion que compara si la suma del primer numero es igual al otro numero y viceversa
return True
else:
return False
#ejemplo 09
amigos(220,284)
#ejemplo 10
amigos(6,5)
def collatz(n:int)->list:
"""
collatz(n)
Determina la secuencia de un numero n hasta llegar a 1
Parameters
----------
n : int
Numero natural positivo.
Returns
-------
output : list
lista con los números que siguen el algoritmo de collatz.
Examples
--------
>>> collatz(9)
[9, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
"""
lista = [n] #creación de la lista con el valor de parámetro
while n!= 1: #ciclo que itera sobre el ultimo valor de n de la lista
if n%2 == 0:
lista.append(int(n/2)) #si el numero es par hace la operación para pares
else:
lista.append(int(3*n +1)) #caso contrario hace la operación para impares
n = lista[-1] #redefine n como el ultimo valor agregado en la lista para la siguiente iteración
return lista
#ejemplo 11
collatz(9)
import math as m #Libreria para calcular la raiz solo porque numpy no me funciono y ** tampoco
def esPrimo(num:int)->bool:
"""
esPrimo(num)
Corrobora si un numero es o no primo con un algoritmo de testeo
Parameters
----------
num : int
Numero a determinar si es o no primo.
Returns
-------
output : bool
Retorna True o False para decir el numero es primo o no respectivamente.
Examples
--------
>>> esPrimo(7)
True
>>> esPrima(4)
False
"""
if num%2==0 and num!=2: #Descartar los pares
return False
for i in range(3,int(m.sqrt(num))+1,2): #Testeo de si un numero es primo que se hace con la raiz
if num%i==0: #Comprobar la divisibilidad
return False
return True
#ejemplo
esPrimo(7)
#ejemplo
esPrimo(20)
def listaPrimos(limite:int)->list:
"""
listaPrimos(limite)
Genera una lista de numeros primos hasta un valor limite
Parameters
----------
limite : int
Valor hasta el que se calculara la lista.
Returns
-------
output : list
Lista con numeros primos hasta el valor limite.
Examples
--------
>>> listaPrimos(20)
[2, 3, 5, 7, 11, 13, 17, 19]
"""
lista=[2] #el minimo primo que existe es 2
for j in range(3,limite+1,2): #Itera de 2 en 2 para descartar los pares
if esPrimo(j): #Comprueba si el numero es primo y lo agrega a la lista
lista.append(j)
return lista
listaPrimos(20)
def goldbach(n:int)->tuple:
"""
goldbach(n)
Aproximacion del valor de pi mediante el método de Leibniz
Parameters
----------
n : int
Numero de al que se le aplicará la conjetura.
Returns
-------
output : tuple
Tupla con los primos tales que su suma de como resultado n.
Examples
--------
>>> goldbach(4)
(2,2)
"""
lista = listaPrimos(n) #Crea una lista con los primos hasta n
for i in lista:
for k in lista: #Se recorre la lista con dos parametros a comparar
suma = i+k
if n == suma: #Compara si la suma de ambos primos es igual al input
tupla = (i,k) #Si lo logra crea la tupla que se retorna como resultado
return tupla
#ejemplo 12
goldbach(4)
#ejemplo 13
goldbach(6)
#ejemplo 14
goldbach(8)
| 0.717309 | 0.952309 |
```
# This cell is used to load the hints and solutions, if needed
from IPython.display import Pretty as disp
hint = 'https://raw.githubusercontent.com/soltaniehha/Business-Analytics/master/docs/hints/' # path to hints on GitHub
```
# Exercise (continued) - Working with Pandas and Seaborn
Now it's your turn to practice what we learned in the class.
In this notebook we will play with some of the concepts we just learned, such as handling missing values, grouping, and aggregation. You have seen this dataset in the previous class, so the first part of the notebook doesn't require you to write any code.
We are working with the following dataset: `data/2017_StPaul_MN_Real_Estate.csv`
Let's call this DataFrame `houses`. We can import the data using a URL, if the data file is located on the internet. This is a very convinient option since our file is located in my Github, however, this is not always the case and in real life the data is not on a public URL. We will use the URL syntax as much as possible to avoid any local path issues:
```
import pandas as pd
url = 'https://raw.githubusercontent.com/soltaniehha/Business-Analytics/master/data/2017_StPaul_MN_Real_Estate.csv'
houses = pd.read_csv(url)
print("There are {} rows and {} columns.".format(houses.shape[0], houses.shape[1]))
```
This dataset has too many columns to study. To start, let's create a new dataset with a smaller number of attributes. To do this, use the following list, `subset_columns`:
```
subset_columns = ['streetaddress','STREETNAME', 'PostalCode', 'StateOrProvince', 'City', 'SchoolDistrictNumber',
'SalesClosePrice', 'LISTDATE', 'offmarketdate', 'LISTPRICE', 'LISTTYPE',
'OriginalListPrice', 'PricePerTSFT', 'DAYSONMARKET', 'ROOF',
'SQFTABOVEGROUND', 'RoomArea1', 'YEARBUILT']
df = houses[subset_columns].copy() # This will create an individual copy of the original DataFrame
# Adding a new column, sales_vs_list
df['sales_vs_list'] = (df['SalesClosePrice'] - df['LISTPRICE'])/df['LISTPRICE'] * 100
print("There are {} rows and {} columns.".format(df.shape[0], df.shape[1]))
df.head()
```
Use `describe()` to get a high level summary of the data:
```
df.describe()
```
Using `.info()` extract more info regarding the missing values and columns types:
```
df.info()
```
From the outcome of `info()` we can see that columns *ROOF* and *RoomArea1* have some null values. We can first visually inspect the rows where for instance *ROOF* is missing and see if we find any common cause:
```
df[df['ROOF'].isnull()].head()
```
# Your Trun
Let's find out what possible values *ROOF* can get. We can do this by applying `.unique()` function on the column of interest
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_01')
```
This is great, but wouldn't it be even cooler to see how popular these roofs are? Let's use a `groupby()` on our `df` DataFrame and count how many times each roof type was used in the dataset. The easy way to do this is to use the function `size()`, which will give you the number of elements in each group:
hint1: this will print a long list, you can use `.head(10)` to limit it.
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_02')
```
Now we will sort it to get the most common ones on top. Use `.sort_values()` (right after your aggregation function and before using `head()`. If you use it after head it will only sort among the limited ones that were printed which is not what we are looking for here):
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_03')
```
Let's find out how many values are missig from *ROOF* (even though we can find out from `info()`).
1. Subset the column of interest by the following format: `DF['col1']`. This will give us a Series object.
2. Chain the `isnull()` function to this Series. I.e, `DF['col1'].isnull()`
3. Chain the `sum()` function to the previous step
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_04')
```
Let's replace the null values with the most commonly value used in the dataset, "Asphalt Shingles". **Note:** This may or may not be the right thing to do depending on the problem you are solving.
To do so we can use the `.fillna()` function, chain it to the column subset, `df['ROOF']`. Within the argument of this function you want to first pass the replacement to be used, here "Asphalt Shingles", and then `inplace=True`. If we don't specify the inplace being active it won't permanently fill the nulls in our DataFrame.
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_05')
```
Check if there are any nulls left in that column (similar to the cell above):
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_06')
```
Check out the `info()`:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_07')
```
We can see that the only column with null is `RoomArea1`. For the sake of practice let's handle it differently this time. We will drop any records that doesn't have a value for this column. We can do this using `dropna()` function.
Do NOT use the option `inplace=True`, instead save the output into a new DataFrame called `df2`:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_08')
```
Let's check out the `info()` on `df2`:
```
df2.info()
```
`dropna()` removes all of the records that have any number of nulls. For other functionalities please check out the help function or go back to 02-Aggregation-and-Grouping.ipynb.
We could continue with `df2` but we weren't really planing on using `RoomArea1` for this analysis. In order to have a clean dataset let's just remove that column. The advantage would be that we get to keep all the 5000 data points.
To do so we will use `drop()` function.
1. Pass the name of the column to drop
2. Set parameter `axis=1`. This will indicate that the name that we passed is a column name and not a row name (index)
3. `inplace=True`
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_09')
```
Check out the `info()`:
```
df.info()
```
Okay! Now that we don't have any missing values in our DataFrame let's continue with some aggregation tasks.
Group our `df` by "City" and calculate min, max, and count for every group and every column.
Hint: use function `agg()` and pass a list of aggregation function you need.
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_10')
```
Too many values are being printed and it's really hard to read. Let's limit this by asking to show `SalesClosePrice` and `SQFTABOVEGROUND` only.
Hint: you can do this by passing `[['SalesClosePrice', 'SQFTABOVEGROUND']]` to groupby.
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_11')
```
Use `describe()` and `groupby()` to get a high level summary of "LISTPRICE" for each "City".
1. Apply groupby and use the column you want to group with
2. Pass the name of the column you'd like `describe()` to describe!
3. Chain `describe()`
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_12')
import seaborn as sns
import matplotlib.pyplot as plt
sns.violinplot(x="City", y="SalesClosePrice", data=df, inner="quartile",
scale='count')
plt.xticks(rotation=90)
plt.title("Distribution of Closing Price for Different Cities")
sns.despine()
```
This time your turn. Use the `boxplot()` function to plot box plots of `sales_vs_list` for each city:
Hint: Code the similar to the one above without setting parameters inner and scale.
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_13')
```
Notice that we have modified the "inner" option of the plot above. It accepts the following values: {"box", "quartile", "point", "stick", None}. Try them to see the difference.
We also have changed the scale method, the method used to scale the width of each violin. These are the possible options: {"area", "count", "width"}. If ``area``, each violin will have the same area. If ``count``, the width of the violins will be scaled by the number of observations in that bin. If ``width``, each violin will have the same width.
Now let's use the `agg()` function to find the avergae `SalesClosePrice` per each `PostalCode` and `count` for each group:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_14')
```
Let's use the `filter()` function to filter all the houses that are within `PostalCode` that have had an average `SalesClosePrice` of less than 250,000. Save the result to a new DataFrame and call it `df_inexpensive_zips`. Run the aggregate again:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_15')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_16')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_17')
```
Use the `transform()` function on `df_inexpensive_zips` to create a new column called `SalesPriceNormalized` that shows the proportional value of a sold house to the most expensive house sold within the same zipcode:
```
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_18')
```
|
github_jupyter
|
# This cell is used to load the hints and solutions, if needed
from IPython.display import Pretty as disp
hint = 'https://raw.githubusercontent.com/soltaniehha/Business-Analytics/master/docs/hints/' # path to hints on GitHub
import pandas as pd
url = 'https://raw.githubusercontent.com/soltaniehha/Business-Analytics/master/data/2017_StPaul_MN_Real_Estate.csv'
houses = pd.read_csv(url)
print("There are {} rows and {} columns.".format(houses.shape[0], houses.shape[1]))
subset_columns = ['streetaddress','STREETNAME', 'PostalCode', 'StateOrProvince', 'City', 'SchoolDistrictNumber',
'SalesClosePrice', 'LISTDATE', 'offmarketdate', 'LISTPRICE', 'LISTTYPE',
'OriginalListPrice', 'PricePerTSFT', 'DAYSONMARKET', 'ROOF',
'SQFTABOVEGROUND', 'RoomArea1', 'YEARBUILT']
df = houses[subset_columns].copy() # This will create an individual copy of the original DataFrame
# Adding a new column, sales_vs_list
df['sales_vs_list'] = (df['SalesClosePrice'] - df['LISTPRICE'])/df['LISTPRICE'] * 100
print("There are {} rows and {} columns.".format(df.shape[0], df.shape[1]))
df.head()
df.describe()
df.info()
df[df['ROOF'].isnull()].head()
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_01')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_02')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_03')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_04')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_05')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_06')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_07')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_08')
df2.info()
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_09')
df.info()
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_10')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_11')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_12')
import seaborn as sns
import matplotlib.pyplot as plt
sns.violinplot(x="City", y="SalesClosePrice", data=df, inner="quartile",
scale='count')
plt.xticks(rotation=90)
plt.title("Distribution of Closing Price for Different Cities")
sns.despine()
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_13')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_14')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_15')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_16')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_17')
# Your answer goes here
# SOLUTION: Uncomment and execute the cell below to get help
#disp(hint + '05_01_18')
| 0.391057 | 0.952442 |
# Single GNSS position from multiple points
**Script prepared by A. Rovere - MARUM, University of Bremen**
This script usess a Monte-Carlo approach to calculate the average position (with positioning uncertainties) given a series of GNSS points collected at the same location. It can be used, for example, when several processing options are available for a base station point.
```
import geopandas as gpd
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from math import pi
```
## Import csv
Import the CSV file containing the different points. See example file for the formatting. Coordinate system for the import file should be EPSG 4326.
```
df = pd.read_csv('Example_data.csv')
gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df['Latitude (dec degrees)'], df['Longitude (dec degrees)']))
gdf.crs = 'epsg:4326'
gdf = gdf.to_crs('epsg:3857')
gdf['X (m)']=gdf.geometry.x
gdf['Y (m)']=gdf.geometry.y
gdf
```
## Monte Carlo process
One line from the dataframe above is selected randomly, then a Lat/Lon/Elevation are sampled from a normal distribution. This process is repeated 10.000 times.
```
lat=[]
lon=[]
elev=[]
val = np.linspace(0, 10000, num=10001)
#Creates a matrix randomly sampling the sea level data points
for x in val:
#Select a random row
rnd = gdf.sample(n=1)
#Select random time and RSL from normal distribution of age and RSL
lat.append(np.random.normal(rnd['Y (m)'], rnd['Latitude 2-sigma (m)']/2, 1))
lon.append(np.random.normal(rnd['X (m)'], rnd['Longitude 2-sigma (m)']/2, 1))
elev.append(np.random.normal(rnd['Height above ellipsoid (m)'], rnd['Elevation 2-sigma (m)']/2, 1));
#Create the dataframe
rand_coord = pd.DataFrame({'Latitude (EPSG 3857, m)':lat, 'Longitude (EPSG 3857, m)':lon,'Elevation (HAE, m)':elev})
rand_coord['Latitude (EPSG 3857, m)'] = rand_coord['Latitude (EPSG 3857, m)'].astype(float)
rand_coord['Longitude (EPSG 3857, m)'] = rand_coord['Longitude (EPSG 3857, m)'].astype(float)
rand_coord['Elevation (HAE, m)'] = rand_coord['Elevation (HAE, m)'].astype(float)
```
## Calculate average coordinates and elevation
With associated 2-sigma uncertainties, and create geodataframe of results.
```
Latavg=np.mean(rand_coord['Latitude (EPSG 3857, m)'])
Lat2sd=np.std(rand_coord['Latitude (EPSG 3857, m)'])*2
Lonavg=np.mean(rand_coord['Longitude (EPSG 3857, m)'])
Lon2sd=np.std(rand_coord['Longitude (EPSG 3857, m)'])*2
Havg=np.mean(rand_coord['Elevation (HAE, m)'])
H2sd=np.std(rand_coord['Elevation (HAE, m)'])
# Create geodataframe with average point values
d={'Processing type': ['Average'],
'Latitude (dec degrees)': [np.nan],
'Longitude (dec degrees)':[np.nan],
'Height above ellipsoid (m)':[Havg],
'Latitude 2-sigma (m)':[Lat2sd],
'Longitude 2-sigma (m)':[Lon2sd],
'Elevation 2-sigma (m)':[H2sd],
'X (m)':[Lonavg],
'Y (m)':[Latavg]}
df1 = pd.DataFrame(data=d)
gdf1 = gpd.GeoDataFrame(df1, geometry=gpd.points_from_xy(df1['X (m)'], df1['Y (m)']))
gdf1.crs='epsg:3857'
gdf1 = gdf1.to_crs('epsg:4326')
gdf1['Latitude (dec degrees)']=gdf1.geometry.x
gdf1['Longitude (dec degrees)']=gdf1.geometry.y
gdf1 = gdf1.to_crs('epsg:3857')
gdf1
f = plt.figure(figsize=(20,10))
ax1= f.add_subplot(121)
ax2 = f.add_subplot(122)
plt.rcParams["axes.labelsize"] = 15
f.suptitle('Average Latitude: {:.9f} decimal degrees +/- {:.3f} m\nAverage Longitude: {:.9f} decimal degrees +/- {:.3f} m\nAverage elevation: : {:.3f} m +/- {:.3f} m'.format(gdf1['Latitude (dec degrees)'][0],gdf1['Latitude 2-sigma (m)'][0],gdf1['Longitude (dec degrees)'][0],gdf1['Longitude 2-sigma (m)'][0],gdf1['Height above ellipsoid (m)'][0],gdf1['Elevation 2-sigma (m)'][0]), fontsize=20)
# Plot the lat/Lon comparison
graph=sns.kdeplot(rand_coord['Longitude (EPSG 3857, m)'], rand_coord['Latitude (EPSG 3857, m)'], kind="kde",fill=True,ax=ax1)
f = np.linspace(0, 2*pi, 100)
for index, row in gdf.iterrows():
Lon=row['X (m)']
Lon_unc=row['Longitude 2-sigma (m)']
Lat=row['Y (m)']
Lat_unc=row['Latitude 2-sigma (m)']
ax1.plot(Lon+Lon_unc*np.cos(f) , Lat+Lat_unc*np.sin(f),color='k')
#Plot the elevation comparison
sns.distplot(rand_coord["Elevation (HAE, m)"], ax=ax2,hist=False)
for index, row in gdf.iterrows():
elev_min=row['Height above ellipsoid (m)']-row['Elevation 2-sigma (m)']
elev_max=row['Height above ellipsoid (m)']+row['Elevation 2-sigma (m)']
ax2.axvspan(xmin=elev_min, xmax=elev_max, alpha=0.1, color='k')
plt.savefig('GNSS_averaged.svg')
print('Average Latitude: {:.9f} decimal degrees +/- {:.3f} m\nAverage Longitude: {:.9f} decimal degrees +/- {:.3f} m\nAverage elevation: : {:.3f} m +/- {:.3f} m'.format(gdf1['Latitude (dec degrees)'][0],gdf1['Latitude 2-sigma (m)'][0],gdf1['Longitude (dec degrees)'][0],gdf1['Longitude 2-sigma (m)'][0],gdf1['Height above ellipsoid (m)'][0],gdf1['Elevation 2-sigma (m)'][0]))
```
***
## License
This software is relased under the MIT license.
Copyright 2020 Alessio Rovere
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
***
# Research funding acknowledgments
This script and associated data were created in the framework of the European Reasearch Council Starting Grant WARMCOASTS (Grant Agreement Number 802414), funded under the European Union's Horizon 2020 research and Innovation programme.
***
# Code acknowledgments
https://geopandas.org/docs/user_guide/projections.html
https://stackoverflow.com/questions/49635436/shapely-point-geometry-in-geopandas-df-to-lat-lon-columns
|
github_jupyter
|
import geopandas as gpd
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from math import pi
df = pd.read_csv('Example_data.csv')
gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df['Latitude (dec degrees)'], df['Longitude (dec degrees)']))
gdf.crs = 'epsg:4326'
gdf = gdf.to_crs('epsg:3857')
gdf['X (m)']=gdf.geometry.x
gdf['Y (m)']=gdf.geometry.y
gdf
lat=[]
lon=[]
elev=[]
val = np.linspace(0, 10000, num=10001)
#Creates a matrix randomly sampling the sea level data points
for x in val:
#Select a random row
rnd = gdf.sample(n=1)
#Select random time and RSL from normal distribution of age and RSL
lat.append(np.random.normal(rnd['Y (m)'], rnd['Latitude 2-sigma (m)']/2, 1))
lon.append(np.random.normal(rnd['X (m)'], rnd['Longitude 2-sigma (m)']/2, 1))
elev.append(np.random.normal(rnd['Height above ellipsoid (m)'], rnd['Elevation 2-sigma (m)']/2, 1));
#Create the dataframe
rand_coord = pd.DataFrame({'Latitude (EPSG 3857, m)':lat, 'Longitude (EPSG 3857, m)':lon,'Elevation (HAE, m)':elev})
rand_coord['Latitude (EPSG 3857, m)'] = rand_coord['Latitude (EPSG 3857, m)'].astype(float)
rand_coord['Longitude (EPSG 3857, m)'] = rand_coord['Longitude (EPSG 3857, m)'].astype(float)
rand_coord['Elevation (HAE, m)'] = rand_coord['Elevation (HAE, m)'].astype(float)
Latavg=np.mean(rand_coord['Latitude (EPSG 3857, m)'])
Lat2sd=np.std(rand_coord['Latitude (EPSG 3857, m)'])*2
Lonavg=np.mean(rand_coord['Longitude (EPSG 3857, m)'])
Lon2sd=np.std(rand_coord['Longitude (EPSG 3857, m)'])*2
Havg=np.mean(rand_coord['Elevation (HAE, m)'])
H2sd=np.std(rand_coord['Elevation (HAE, m)'])
# Create geodataframe with average point values
d={'Processing type': ['Average'],
'Latitude (dec degrees)': [np.nan],
'Longitude (dec degrees)':[np.nan],
'Height above ellipsoid (m)':[Havg],
'Latitude 2-sigma (m)':[Lat2sd],
'Longitude 2-sigma (m)':[Lon2sd],
'Elevation 2-sigma (m)':[H2sd],
'X (m)':[Lonavg],
'Y (m)':[Latavg]}
df1 = pd.DataFrame(data=d)
gdf1 = gpd.GeoDataFrame(df1, geometry=gpd.points_from_xy(df1['X (m)'], df1['Y (m)']))
gdf1.crs='epsg:3857'
gdf1 = gdf1.to_crs('epsg:4326')
gdf1['Latitude (dec degrees)']=gdf1.geometry.x
gdf1['Longitude (dec degrees)']=gdf1.geometry.y
gdf1 = gdf1.to_crs('epsg:3857')
gdf1
f = plt.figure(figsize=(20,10))
ax1= f.add_subplot(121)
ax2 = f.add_subplot(122)
plt.rcParams["axes.labelsize"] = 15
f.suptitle('Average Latitude: {:.9f} decimal degrees +/- {:.3f} m\nAverage Longitude: {:.9f} decimal degrees +/- {:.3f} m\nAverage elevation: : {:.3f} m +/- {:.3f} m'.format(gdf1['Latitude (dec degrees)'][0],gdf1['Latitude 2-sigma (m)'][0],gdf1['Longitude (dec degrees)'][0],gdf1['Longitude 2-sigma (m)'][0],gdf1['Height above ellipsoid (m)'][0],gdf1['Elevation 2-sigma (m)'][0]), fontsize=20)
# Plot the lat/Lon comparison
graph=sns.kdeplot(rand_coord['Longitude (EPSG 3857, m)'], rand_coord['Latitude (EPSG 3857, m)'], kind="kde",fill=True,ax=ax1)
f = np.linspace(0, 2*pi, 100)
for index, row in gdf.iterrows():
Lon=row['X (m)']
Lon_unc=row['Longitude 2-sigma (m)']
Lat=row['Y (m)']
Lat_unc=row['Latitude 2-sigma (m)']
ax1.plot(Lon+Lon_unc*np.cos(f) , Lat+Lat_unc*np.sin(f),color='k')
#Plot the elevation comparison
sns.distplot(rand_coord["Elevation (HAE, m)"], ax=ax2,hist=False)
for index, row in gdf.iterrows():
elev_min=row['Height above ellipsoid (m)']-row['Elevation 2-sigma (m)']
elev_max=row['Height above ellipsoid (m)']+row['Elevation 2-sigma (m)']
ax2.axvspan(xmin=elev_min, xmax=elev_max, alpha=0.1, color='k')
plt.savefig('GNSS_averaged.svg')
print('Average Latitude: {:.9f} decimal degrees +/- {:.3f} m\nAverage Longitude: {:.9f} decimal degrees +/- {:.3f} m\nAverage elevation: : {:.3f} m +/- {:.3f} m'.format(gdf1['Latitude (dec degrees)'][0],gdf1['Latitude 2-sigma (m)'][0],gdf1['Longitude (dec degrees)'][0],gdf1['Longitude 2-sigma (m)'][0],gdf1['Height above ellipsoid (m)'][0],gdf1['Elevation 2-sigma (m)'][0]))
| 0.675336 | 0.934455 |
# Working with Time Series
Pandas was developed in the context of financial modeling, so as you might expect, it contains a fairly extensive set of tools for working with dates, times, and time-indexed data.
Date and time data comes in a few flavors, which we will discuss here:
- *Time stamps* reference particular moments in time (e.g., July 4th, 2015 at 7:00am).
- *Time intervals* and *periods* reference a length of time between a particular beginning and end point; for example, the year 2015. Periods usually reference a special case of time intervals in which each interval is of uniform length and does not overlap (e.g., 24 hour-long periods comprising days).
- *Time deltas* or *durations* reference an exact length of time (e.g., a duration of 22.56 seconds).
In this section, we will introduce how to work with each of these types of date/time data in Pandas.
This short section is by no means a complete guide to the time series tools available in Python or Pandas, but instead is intended as a broad overview of how you as a user should approach working with time series.
We will start with a brief discussion of tools for dealing with dates and times in Python, before moving more specifically to a discussion of the tools provided by Pandas.
After listing some resources that go into more depth, we will review some short examples of working with time series data in Pandas.
## Dates and Times in Python
The Python world has a number of available representations of dates, times, deltas, and timespans.
While the time series tools provided by Pandas tend to be the most useful for data science applications, it is helpful to see their relationship to other packages used in Python.
### Native Python dates and times: ``datetime`` and ``dateutil``
Python's basic objects for working with dates and times reside in the built-in ``datetime`` module.
Along with the third-party ``dateutil`` module, you can use it to quickly perform a host of useful functionalities on dates and times.
For example, you can manually build a date using the ``datetime`` type:
```
from datetime import datetime
datetime(year=2015, month=7, day=4)
```
Or, using the ``dateutil`` module, you can parse dates from a variety of string formats:
```
from dateutil import parser
date = parser.parse("4th of July, 2015")
date
```
Once you have a ``datetime`` object, you can do things like printing the day of the week:
```
date.strftime('%A')
```
In the final line, we've used one of the standard string format codes for printing dates (``"%A"``), which you can read about in the [strftime section](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior) of Python's [datetime documentation](https://docs.python.org/3/library/datetime.html).
Documentation of other useful date utilities can be found in [dateutil's online documentation](http://labix.org/python-dateutil).
A related package to be aware of is [``pytz``](http://pytz.sourceforge.net/), which contains tools for working with the most migrane-inducing piece of time series data: time zones.
The power of ``datetime`` and ``dateutil`` lie in their flexibility and easy syntax: you can use these objects and their built-in methods to easily perform nearly any operation you might be interested in.
Where they break down is when you wish to work with large arrays of dates and times:
just as lists of Python numerical variables are suboptimal compared to NumPy-style typed numerical arrays, lists of Python datetime objects are suboptimal compared to typed arrays of encoded dates.
### Typed arrays of times: NumPy's ``datetime64``
The weaknesses of Python's datetime format inspired the NumPy team to add a set of native time series data type to NumPy.
The ``datetime64`` dtype encodes dates as 64-bit integers, and thus allows arrays of dates to be represented very compactly.
The ``datetime64`` requires a very specific input format:
```
import numpy as np
date = np.array('2015-07-04', dtype=np.datetime64)
date
```
Once we have this date formatted, however, we can quickly do vectorized operations on it:
```
date + np.arange(12)
```
Because of the uniform type in NumPy ``datetime64`` arrays, this type of operation can be accomplished much more quickly than if we were working directly with Python's ``datetime`` objects, especially as arrays get large
(we introduced this type of vectorization in *Computation on NumPy Arrays: Universal Functions*).
One detail of the ``datetime64`` and ``timedelta64`` objects is that they are built on a *fundamental time unit*.
Because the ``datetime64`` object is limited to 64-bit precision, the range of encodable times is $2^{64}$ times this fundamental unit.
In other words, ``datetime64`` imposes a trade-off between *time resolution* and *maximum time span*.
For example, if you want a time resolution of one nanosecond, you only have enough information to encode a range of $2^{64}$ nanoseconds, or just under 600 years.
NumPy will infer the desired unit from the input; for example, here is a day-based datetime:
```
np.datetime64('2015-07-04')
```
Here is a minute-based datetime:
```
np.datetime64('2015-07-04 12:00')
```
Notice that the time zone is automatically set to the local time on the computer executing the code.
You can force any desired fundamental unit using one of many format codes; for example, here we'll force a nanosecond-based time:
```
np.datetime64('2015-07-04 12:59:59.50', 'ns')
```
The following table, drawn from the [NumPy datetime64 documentation](http://docs.scipy.org/doc/numpy/reference/arrays.datetime.html), lists the available format codes along with the relative and absolute timespans that they can encode:
|Code | Meaning | Time span (relative) | Time span (absolute) |
|--------|-------------|----------------------|------------------------|
| ``Y`` | Year | ± 9.2e18 years | [9.2e18 BC, 9.2e18 AD] |
| ``M`` | Month | ± 7.6e17 years | [7.6e17 BC, 7.6e17 AD] |
| ``W`` | Week | ± 1.7e17 years | [1.7e17 BC, 1.7e17 AD] |
| ``D`` | Day | ± 2.5e16 years | [2.5e16 BC, 2.5e16 AD] |
| ``h`` | Hour | ± 1.0e15 years | [1.0e15 BC, 1.0e15 AD] |
| ``m`` | Minute | ± 1.7e13 years | [1.7e13 BC, 1.7e13 AD] |
| ``s`` | Second | ± 2.9e12 years | [ 2.9e9 BC, 2.9e9 AD] |
| ``ms`` | Millisecond | ± 2.9e9 years | [ 2.9e6 BC, 2.9e6 AD] |
| ``us`` | Microsecond | ± 2.9e6 years | [290301 BC, 294241 AD] |
| ``ns`` | Nanosecond | ± 292 years | [ 1678 AD, 2262 AD] |
| ``ps`` | Picosecond | ± 106 days | [ 1969 AD, 1970 AD] |
| ``fs`` | Femtosecond | ± 2.6 hours | [ 1969 AD, 1970 AD] |
| ``as`` | Attosecond | ± 9.2 seconds | [ 1969 AD, 1970 AD] |
For the types of data we see in the real world, a useful default is ``datetime64[ns]``, as it can encode a useful range of modern dates with a suitably fine precision.
Finally, we will note that while the ``datetime64`` data type addresses some of the deficiencies of the built-in Python ``datetime`` type, it lacks many of the convenient methods and functions provided by ``datetime`` and especially ``dateutil``.
More information can be found in [NumPy's datetime64 documentation](http://docs.scipy.org/doc/numpy/reference/arrays.datetime.html).
### Dates and times in pandas: best of both worlds
Pandas builds upon all the tools just discussed to provide a ``Timestamp`` object, which combines the ease-of-use of ``datetime`` and ``dateutil`` with the efficient storage and vectorized interface of ``numpy.datetime64``.
From a group of these ``Timestamp`` objects, Pandas can construct a ``DatetimeIndex`` that can be used to index data in a ``Series`` or ``DataFrame``; we'll see many examples of this below.
For example, we can use Pandas tools to repeat the demonstration from above.
We can parse a flexibly formatted string date, and use format codes to output the day of the week:
```
import pandas as pd
date = pd.to_datetime("4th of July, 2015")
date
date.strftime('%A')
```
Additionally, we can do NumPy-style vectorized operations directly on this same object:
```
date + pd.to_timedelta(np.arange(12), 'D')
```
In the next section, we will take a closer look at manipulating time series data with the tools provided by Pandas.
## Pandas Time Series: Indexing by Time
Where the Pandas time series tools really become useful is when you begin to *index data by timestamps*.
For example, we can construct a ``Series`` object that has time indexed data:
```
index = pd.DatetimeIndex(['2014-07-04', '2014-08-04',
'2015-07-04', '2015-08-04'])
data = pd.Series([0, 1, 2, 3], index=index)
data
```
Now that we have this data in a ``Series``, we can make use of any of the ``Series`` indexing patterns we discussed in previous sections, passing values that can be coerced into dates:
```
data['2014-07-04':'2015-07-04']
```
There are additional special date-only indexing operations, such as passing a year to obtain a slice of all data from that year:
```
data['2015']
```
Later, we will see additional examples of the convenience of dates-as-indices.
But first, a closer look at the available time series data structures.
## Pandas Time Series Data Structures
This section will introduce the fundamental Pandas data structures for working with time series data:
- For *time stamps*, Pandas provides the ``Timestamp`` type. As mentioned before, it is essentially a replacement for Python's native ``datetime``, but is based on the more efficient ``numpy.datetime64`` data type. The associated Index structure is ``DatetimeIndex``.
- For *time Periods*, Pandas provides the ``Period`` type. This encodes a fixed-frequency interval based on ``numpy.datetime64``. The associated index structure is ``PeriodIndex``.
- For *time deltas* or *durations*, Pandas provides the ``Timedelta`` type. ``Timedelta`` is a more efficient replacement for Python's native ``datetime.timedelta`` type, and is based on ``numpy.timedelta64``. The associated index structure is ``TimedeltaIndex``.
The most fundamental of these date/time objects are the ``Timestamp`` and ``DatetimeIndex`` objects.
While these class objects can be invoked directly, it is more common to use the ``pd.to_datetime()`` function, which can parse a wide variety of formats.
Passing a single date to ``pd.to_datetime()`` yields a ``Timestamp``; passing a series of dates by default yields a ``DatetimeIndex``:
```
dates = pd.to_datetime([datetime(2015, 7, 3), '4th of July, 2015',
'2015-Jul-6', '07-07-2015', '20150708'])
dates
```
Any ``DatetimeIndex`` can be converted to a ``PeriodIndex`` with the ``to_period()`` function with the addition of a frequency code; here we'll use ``'D'`` to indicate daily frequency:
```
dates.to_period('D')
```
A ``TimedeltaIndex`` is created, for example, when a date is subtracted from another:
```
dates - dates[0]
```
### Regular sequences: ``pd.date_range()``
To make the creation of regular date sequences more convenient, Pandas offers a few functions for this purpose: ``pd.date_range()`` for timestamps, ``pd.period_range()`` for periods, and ``pd.timedelta_range()`` for time deltas.
We've seen that Python's ``range()`` and NumPy's ``np.arange()`` turn a startpoint, endpoint, and optional stepsize into a sequence.
Similarly, ``pd.date_range()`` accepts a start date, an end date, and an optional frequency code to create a regular sequence of dates.
By default, the frequency is one day:
```
pd.date_range('2015-07-03', '2015-07-10')
```
Alternatively, the date range can be specified not with a start and endpoint, but with a startpoint and a number of periods:
```
pd.date_range('2015-07-03', periods=8)
```
The spacing can be modified by altering the ``freq`` argument, which defaults to ``D``.
For example, here we will construct a range of hourly timestamps:
```
pd.date_range('2015-07-03', periods=8, freq='H')
```
To create regular sequences of ``Period`` or ``Timedelta`` values, the very similar ``pd.period_range()`` and ``pd.timedelta_range()`` functions are useful.
Here are some monthly periods:
```
pd.period_range('2015-07', periods=8, freq='M')
```
And a sequence of durations increasing by an hour:
```
pd.timedelta_range(0, periods=10, freq='H')
```
All of these require an understanding of Pandas frequency codes, which we'll summarize in the next section.
## Frequencies and Offsets
Fundamental to these Pandas time series tools is the concept of a frequency or date offset.
Just as we saw the ``D`` (day) and ``H`` (hour) codes above, we can use such codes to specify any desired frequency spacing.
The following table summarizes the main codes available:
| Code | Description | Code | Description |
|--------|---------------------|--------|----------------------|
| ``D`` | Calendar day | ``B`` | Business day |
| ``W`` | Weekly | | |
| ``M`` | Month end | ``BM`` | Business month end |
| ``Q`` | Quarter end | ``BQ`` | Business quarter end |
| ``A`` | Year end | ``BA`` | Business year end |
| ``H`` | Hours | ``BH`` | Business hours |
| ``T`` | Minutes | | |
| ``S`` | Seconds | | |
| ``L`` | Milliseonds | | |
| ``U`` | Microseconds | | |
| ``N`` | nanoseconds | | |
The monthly, quarterly, and annual frequencies are all marked at the end of the specified period.
By adding an ``S`` suffix to any of these, they instead will be marked at the beginning:
| Code | Description || Code | Description |
|---------|------------------------||---------|------------------------|
| ``MS`` | Month start ||``BMS`` | Business month start |
| ``QS`` | Quarter start ||``BQS`` | Business quarter start |
| ``AS`` | Year start ||``BAS`` | Business year start |
Additionally, you can change the month used to mark any quarterly or annual code by adding a three-letter month code as a suffix:
- ``Q-JAN``, ``BQ-FEB``, ``QS-MAR``, ``BQS-APR``, etc.
- ``A-JAN``, ``BA-FEB``, ``AS-MAR``, ``BAS-APR``, etc.
In the same way, the split-point of the weekly frequency can be modified by adding a three-letter weekday code:
- ``W-SUN``, ``W-MON``, ``W-TUE``, ``W-WED``, etc.
On top of this, codes can be combined with numbers to specify other frequencies.
For example, for a frequency of 2 hours 30 minutes, we can combine the hour (``H``) and minute (``T``) codes as follows:
```
pd.timedelta_range(0, periods=9, freq="2H30T")
```
All of these short codes refer to specific instances of Pandas time series offsets, which can be found in the ``pd.tseries.offsets`` module.
For example, we can create a business day offset directly as follows:
```
from pandas.tseries.offsets import BDay
pd.date_range('2015-07-01', periods=5, freq=BDay())
```
For more discussion of the use of frequencies and offsets, see the ["DateOffset" section](http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects) of the Pandas documentation.
## Resampling, Shifting, and Windowing
The ability to use dates and times as indices to intuitively organize and access data is an important piece of the Pandas time series tools.
The benefits of indexed data in general (automatic alignment during operations, intuitive data slicing and access, etc.) still apply, and Pandas provides several additional time series-specific operations.
We will take a look at a few of those here, using some stock price data as an example.
Because Pandas was developed largely in a finance context, it includes some very specific tools for financial data.
For example, the accompanying ``pandas-datareader`` package (installable via ``conda install pandas-datareader``), knows how to import financial data from a number of available sources, including Yahoo finance, Google Finance, and others.
Here we will load Google's closing price history:
```
#!pip install pandas-datareader
from pandas_datareader import data
goog = data.DataReader('GOOG', start='2004', end='2016', data_source='google')
goog.head()
```
For simplicity, we'll use just the closing price:
```
goog = goog['Close']
```
We can visualize this using the ``plot()`` method, after the normal Matplotlib setup boilerplate (see *Introduction To Matplotlib*):
```
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn; seaborn.set()
goog.plot();
```
### Resampling and converting frequencies
One common need for time series data is resampling at a higher or lower frequency.
This can be done using the ``resample()`` method, or the much simpler ``asfreq()`` method.
The primary difference between the two is that ``resample()`` is fundamentally a *data aggregation*, while ``asfreq()`` is fundamentally a *data selection*.
Taking a look at the Google closing price, let's compare what the two return when we down-sample the data.
Here we will resample the data at the end of business year:
```
goog.plot(alpha=0.5, style='-')
goog.resample('BA').mean().plot(style=':')
goog.asfreq('BA').plot(style='--');
plt.legend(['input', 'resample', 'asfreq'],
loc='upper left');
```
Notice the difference: at each point, ``resample`` reports the *average of the previous year*, while ``asfreq`` reports the *value at the end of the year*.
For up-sampling, ``resample()`` and ``asfreq()`` are largely equivalent, though resample has many more options available.
In this case, the default for both methods is to leave the up-sampled points empty, that is, filled with NA values.
Just as with the ``pd.fillna()`` function discussed previously, ``asfreq()`` accepts a ``method`` argument to specify how values are imputed.
Here, we will resample the business day data at a daily frequency (i.e., including weekends):
```
fig, ax = plt.subplots(2, sharex=True)
data = goog.iloc[:10]
data.asfreq('D').plot(ax=ax[0], marker='o')
data.asfreq('D', method='bfill').plot(ax=ax[1], style='-o')
data.asfreq('D', method='ffill').plot(ax=ax[1], style='--o')
ax[1].legend(["back-fill", "forward-fill"]);
```
The top panel is the default: non-business days are left as NA values and do not appear on the plot.
The bottom panel shows the differences between two strategies for filling the gaps: forward-filling and backward-filling.
### Time-shifts
Another common time series-specific operation is shifting of data in time.
Pandas has two closely related methods for computing this: ``shift()`` and ``tshift()``
In short, the difference between them is that ``shift()`` *shifts the data*, while ``tshift()`` *shifts the index*.
In both cases, the shift is specified in multiples of the frequency.
Here we will both ``shift()`` and ``tshift()`` by 900 days;
```
fig, ax = plt.subplots(3, sharey=True)
# apply a frequency to the data
goog = goog.asfreq('D', method='pad')
goog.plot(ax=ax[0])
goog.shift(900).plot(ax=ax[1])
goog.tshift(900).plot(ax=ax[2])
# legends and annotations
local_max = pd.to_datetime('2007-11-05')
offset = pd.Timedelta(900, 'D')
ax[0].legend(['input'], loc=2)
ax[0].get_xticklabels()[2].set(weight='heavy', color='red')
ax[0].axvline(local_max, alpha=0.3, color='red')
ax[1].legend(['shift(900)'], loc=2)
ax[1].get_xticklabels()[2].set(weight='heavy', color='red')
ax[1].axvline(local_max + offset, alpha=0.3, color='red')
ax[2].legend(['tshift(900)'], loc=2)
ax[2].get_xticklabels()[1].set(weight='heavy', color='red')
ax[2].axvline(local_max + offset, alpha=0.3, color='red');
```
We see here that ``shift(900)`` shifts the *data* by 900 days, pushing some of it off the end of the graph (and leaving NA values at the other end), while ``tshift(900)`` shifts the *index values* by 900 days.
A common context for this type of shift is in computing differences over time. For example, we use shifted values to compute the one-year return on investment for Google stock over the course of the dataset:
```
ROI = 100 * (goog.tshift(-365) / goog - 1)
ROI.plot()
plt.ylabel('% Return on Investment');
```
This helps us to see the overall trend in Google stock: thus far, the most profitable times to invest in Google have been (unsurprisingly, in retrospect) shortly after its IPO, and in the middle of the 2009 recession.
### Rolling windows
Rolling statistics are a third type of time series-specific operation implemented by Pandas.
These can be accomplished via the ``rolling()`` attribute of ``Series`` and ``DataFrame`` objects, which returns a view similar to what we saw with the ``groupby`` operation (see *Aggregation and Grouping*).
This rolling view makes available a number of aggregation operations by default.
For example, here is the one-year centered rolling mean and standard deviation of the Google stock prices:
```
rolling = goog.rolling(365, center=True)
data = pd.DataFrame({'input': goog,
'one-year rolling_mean': rolling.mean(),
'one-year rolling_std': rolling.std()})
ax = data.plot(style=['-', '--', ':'])
ax.lines[0].set_alpha(0.3)
```
As with group-by operations, the ``aggregate()`` and ``apply()`` methods can be used for custom rolling computations.
## Where to Learn More
This section has provided only a brief summary of some of the most essential features of time series tools provided by Pandas; for a more complete discussion, you can refer to the ["Time Series/Date" section](http://pandas.pydata.org/pandas-docs/stable/timeseries.html) of the Pandas online documentation.
Another excellent resource is the textbook [Python for Data Analysis](http://shop.oreilly.com/product/0636920023784.do) by Wes McKinney (OReilly, 2012).
Although it is now a few years old, it is an invaluable resource on the use of Pandas.
In particular, this book emphasizes time series tools in the context of business and finance, and focuses much more on particular details of business calendars, time zones, and related topics.
As always, you can also use the IPython help functionality to explore and try further options available to the functions and methods discussed here. I find this often is the best way to learn a new Python tool.
## Example: Visualizing Seattle Bicycle Counts
As a more involved example of working with some time series data, let's take a look at bicycle counts on Seattle's [Fremont Bridge](http://www.openstreetmap.org/#map=17/47.64813/-122.34965).
This data comes from an automated bicycle counter, installed in late 2012, which has inductive sensors on the east and west sidewalks of the bridge.
The hourly bicycle counts can be downloaded from http://data.seattle.gov/; here is the [direct link to the dataset](https://data.seattle.gov/Transportation/Fremont-Bridge-Hourly-Bicycle-Counts-by-Month-Octo/65db-xm6k).
As of summer 2016, the CSV can be downloaded as follows:
```
# !curl -o FremontBridge.csv https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD
```
Once this dataset is downloaded, we can use Pandas to read the CSV output into a ``DataFrame``.
We will specify that we want the Date as an index, and we want these dates to be automatically parsed:
```
data = pd.read_csv('data/FremontBridge.csv', index_col='Date', parse_dates=True)
data.head()
```
For convenience, we'll further process this dataset by shortening the column names and adding a "Total" column:
```
data.columns = ['West', 'East']
data['Total'] = data.eval('West + East')
```
Now let's take a look at the summary statistics for this data:
```
data.dropna().describe()
```
### Visualizing the data
We can gain some insight into the dataset by visualizing it.
Let's start by plotting the raw data:
```
%matplotlib inline
import seaborn; seaborn.set()
data.plot()
plt.ylabel('Hourly Bicycle Count');
```
The ~25,000 hourly samples are far too dense for us to make much sense of.
We can gain more insight by resampling the data to a coarser grid.
Let's resample by week:
```
weekly = data.resample('W').sum()
weekly.plot(style=[':', '--', '-'])
plt.ylabel('Weekly bicycle count');
```
This shows us some interesting seasonal trends: as you might expect, people bicycle more in the summer than in the winter, and even within a particular season the bicycle use varies from week to week (likely dependent on weather; see *In Depth: Linear Regression*) where we explore this further).
Another way that comes in handy for aggregating the data is to use a rolling mean, utilizing the ``pd.rolling_mean()`` function.
Here we'll do a 30 day rolling mean of our data, making sure to center the window:
```
daily = data.resample('D').sum()
daily.rolling(30, center=True).sum().plot(style=[':', '--', '-'])
plt.ylabel('mean hourly count');
```
The jaggedness of the result is due to the hard cutoff of the window.
We can get a smoother version of a rolling mean using a window function–for example, a Gaussian window.
The following code specifies both the width of the window (we chose 50 days) and the width of the Gaussian within the window (we chose 10 days):
```
daily.rolling(50, center=True,
win_type='gaussian').sum(std=10).plot(style=[':', '--', '-']);
```
### Digging into the data
While these smoothed data views are useful to get an idea of the general trend in the data, they hide much of the interesting structure.
For example, we might want to look at the average traffic as a function of the time of day.
We can do this using the GroupBy functionality discussed in [Aggregation and Grouping](03.08-Aggregation-and-Grouping.ipynb):
```
by_time = data.groupby(data.index.time).mean()
hourly_ticks = 4 * 60 * 60 * np.arange(6)
by_time.plot(xticks=hourly_ticks, style=[':', '--', '-']);
```
The hourly traffic is a strongly bimodal distribution, with peaks around 8:00 in the morning and 5:00 in the evening.
This is likely evidence of a strong component of commuter traffic crossing the bridge.
This is further evidenced by the differences between the western sidewalk (generally used going toward downtown Seattle), which peaks more strongly in the morning, and the eastern sidewalk (generally used going away from downtown Seattle), which peaks more strongly in the evening.
We also might be curious about how things change based on the day of the week. Again, we can do this with a simple groupby:
```
by_weekday = data.groupby(data.index.dayofweek).mean()
by_weekday.index = ['Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun']
by_weekday.plot(style=[':', '--', '-']);
```
This shows a strong distinction between weekday and weekend totals, with around twice as many average riders crossing the bridge on Monday through Friday than on Saturday and Sunday.
With this in mind, let's do a compound GroupBy and look at the hourly trend on weekdays versus weekends.
We'll start by grouping by both a flag marking the weekend, and the time of day:
```
weekend = np.where(data.index.weekday < 5, 'Weekday', 'Weekend')
by_time = data.groupby([weekend, data.index.time]).mean()
```
Now we'll use some of the Matplotlib tools described in *Multiple Subplots* to plot two panels side by side:
```
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2, figsize=(14, 5))
by_time.ix['Weekday'].plot(ax=ax[0], title='Weekdays',
xticks=hourly_ticks, style=[':', '--', '-'])
by_time.ix['Weekend'].plot(ax=ax[1], title='Weekends',
xticks=hourly_ticks, style=[':', '--', '-']);
```
The result is very interesting: we see a bimodal commute pattern during the work week, and a unimodal recreational pattern during the weekends.
It would be interesting to dig through this data in more detail, and examine the effect of weather, temperature, time of year, and other factors on people's commuting patterns.
We will also revisit this dataset in the context of modeling in *In Depth: Linear Regression*).
|
github_jupyter
|
from datetime import datetime
datetime(year=2015, month=7, day=4)
from dateutil import parser
date = parser.parse("4th of July, 2015")
date
date.strftime('%A')
import numpy as np
date = np.array('2015-07-04', dtype=np.datetime64)
date
date + np.arange(12)
np.datetime64('2015-07-04')
np.datetime64('2015-07-04 12:00')
np.datetime64('2015-07-04 12:59:59.50', 'ns')
import pandas as pd
date = pd.to_datetime("4th of July, 2015")
date
date.strftime('%A')
date + pd.to_timedelta(np.arange(12), 'D')
index = pd.DatetimeIndex(['2014-07-04', '2014-08-04',
'2015-07-04', '2015-08-04'])
data = pd.Series([0, 1, 2, 3], index=index)
data
data['2014-07-04':'2015-07-04']
data['2015']
dates = pd.to_datetime([datetime(2015, 7, 3), '4th of July, 2015',
'2015-Jul-6', '07-07-2015', '20150708'])
dates
dates.to_period('D')
dates - dates[0]
pd.date_range('2015-07-03', '2015-07-10')
pd.date_range('2015-07-03', periods=8)
pd.date_range('2015-07-03', periods=8, freq='H')
pd.period_range('2015-07', periods=8, freq='M')
pd.timedelta_range(0, periods=10, freq='H')
pd.timedelta_range(0, periods=9, freq="2H30T")
from pandas.tseries.offsets import BDay
pd.date_range('2015-07-01', periods=5, freq=BDay())
#!pip install pandas-datareader
from pandas_datareader import data
goog = data.DataReader('GOOG', start='2004', end='2016', data_source='google')
goog.head()
goog = goog['Close']
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn; seaborn.set()
goog.plot();
goog.plot(alpha=0.5, style='-')
goog.resample('BA').mean().plot(style=':')
goog.asfreq('BA').plot(style='--');
plt.legend(['input', 'resample', 'asfreq'],
loc='upper left');
fig, ax = plt.subplots(2, sharex=True)
data = goog.iloc[:10]
data.asfreq('D').plot(ax=ax[0], marker='o')
data.asfreq('D', method='bfill').plot(ax=ax[1], style='-o')
data.asfreq('D', method='ffill').plot(ax=ax[1], style='--o')
ax[1].legend(["back-fill", "forward-fill"]);
fig, ax = plt.subplots(3, sharey=True)
# apply a frequency to the data
goog = goog.asfreq('D', method='pad')
goog.plot(ax=ax[0])
goog.shift(900).plot(ax=ax[1])
goog.tshift(900).plot(ax=ax[2])
# legends and annotations
local_max = pd.to_datetime('2007-11-05')
offset = pd.Timedelta(900, 'D')
ax[0].legend(['input'], loc=2)
ax[0].get_xticklabels()[2].set(weight='heavy', color='red')
ax[0].axvline(local_max, alpha=0.3, color='red')
ax[1].legend(['shift(900)'], loc=2)
ax[1].get_xticklabels()[2].set(weight='heavy', color='red')
ax[1].axvline(local_max + offset, alpha=0.3, color='red')
ax[2].legend(['tshift(900)'], loc=2)
ax[2].get_xticklabels()[1].set(weight='heavy', color='red')
ax[2].axvline(local_max + offset, alpha=0.3, color='red');
ROI = 100 * (goog.tshift(-365) / goog - 1)
ROI.plot()
plt.ylabel('% Return on Investment');
rolling = goog.rolling(365, center=True)
data = pd.DataFrame({'input': goog,
'one-year rolling_mean': rolling.mean(),
'one-year rolling_std': rolling.std()})
ax = data.plot(style=['-', '--', ':'])
ax.lines[0].set_alpha(0.3)
# !curl -o FremontBridge.csv https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD
data = pd.read_csv('data/FremontBridge.csv', index_col='Date', parse_dates=True)
data.head()
data.columns = ['West', 'East']
data['Total'] = data.eval('West + East')
data.dropna().describe()
%matplotlib inline
import seaborn; seaborn.set()
data.plot()
plt.ylabel('Hourly Bicycle Count');
weekly = data.resample('W').sum()
weekly.plot(style=[':', '--', '-'])
plt.ylabel('Weekly bicycle count');
daily = data.resample('D').sum()
daily.rolling(30, center=True).sum().plot(style=[':', '--', '-'])
plt.ylabel('mean hourly count');
daily.rolling(50, center=True,
win_type='gaussian').sum(std=10).plot(style=[':', '--', '-']);
by_time = data.groupby(data.index.time).mean()
hourly_ticks = 4 * 60 * 60 * np.arange(6)
by_time.plot(xticks=hourly_ticks, style=[':', '--', '-']);
by_weekday = data.groupby(data.index.dayofweek).mean()
by_weekday.index = ['Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun']
by_weekday.plot(style=[':', '--', '-']);
weekend = np.where(data.index.weekday < 5, 'Weekday', 'Weekend')
by_time = data.groupby([weekend, data.index.time]).mean()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2, figsize=(14, 5))
by_time.ix['Weekday'].plot(ax=ax[0], title='Weekdays',
xticks=hourly_ticks, style=[':', '--', '-'])
by_time.ix['Weekend'].plot(ax=ax[1], title='Weekends',
xticks=hourly_ticks, style=[':', '--', '-']);
| 0.685107 | 0.991891 |
```
FINNISH_NUM2WORDS = {
1: 'yksi',
2: 'kaksi',
3: 'kolme',
4: 'neljä',
5: 'viisi',
6: 'kuusi',
7: 'seitsemän',
8: 'kahdeksan',
9: 'yhdeksän',
10: 'kymmenen'
}
PLACE_WORDS = {
0: {'plural': '', 'singular': ''},
1: {'plural': 'tuhatta', 'singular': 'tuhat'},
2: {'plural': 'miljoonaa', 'singular': 'miljoona'},
3: {'plural': 'miljardia', 'singular': 'miljardi'},
4: {'plural': 'biljoonaa', 'singular': 'biljoona'},
5: {'plural': 'biljardia', 'singular': 'biljardi'},
6: {'plural': 'triljoonaa', 'singular': 'triljoona'},
7: {'plural': 'triljardia', 'singular': 'triljardi'},
8: {'plural': 'kvadriljoonaa', 'singular': 'kvadriljoona'},
9: {'plural': 'kvadriljardia', 'singular': 'kvadriljardi'},
10: {'plural': 'kvintiljoonaa', 'singular': 'kvintiljoona'},
11: {'plural': 'kvintiljardia', 'singular': 'kvintiljardi'},
12: {'plural': 'sekstiljoonaa', 'singular': 'sekstiljoona'},
13: {'plural': 'sekstiljardia', 'singular': 'sekstiljardi'},
14: {'plural': 'septiljoonaa', 'singular': 'septiljoona'},
15: {'plural': 'septiljardia', 'singular': 'septiljardi'},
16: {'plural': 'oktiljoonaa', 'singular': 'oktiljoona'},
17: {'plural': 'oktiljardia', 'singular': 'oktiljardi'},
18: {'plural': 'noviljoonaa', 'singular': 'noviljoona'},
19: {'plural': 'noviljardia', 'singular': 'noviljardi'},
20: {'plural': 'dekiljoonaa', 'singular': 'dekiljoona'},
21: {'plural': 'dekiljardia', 'singular': 'dekiljardi'},
22: {'plural': 'undekiljoonaa', 'singular': 'undekiljoona'},
23: {'plural': 'undekiljardia', 'singular': 'undekiljardi'},
24: {'plural': 'duodekiljoonaa', 'singular': 'duodekiljoona'},
25: {'plural': 'duodekiljardia', 'singular': 'duodekiljardi'},
26: {'plural': 'tredekiljoonaa', 'singular': 'tredekiljoona'},
27: {'plural': 'tredekiljardia', 'singular': 'tredekiljardi'},
28: {'plural': 'kvattuordekiljoonaa', 'singular': 'kvattuordekiljoona'},
29: {'plural': 'kvattuordekiljardia', 'singular': 'kvattuordekiljardi'},
30: {'plural': 'kvindekiljoonaa', 'singular': 'kvindekiljoona'},
31: {'plural': 'kvindekiljardia', 'singular': 'kvindekiljardi'},
32: {'plural': 'sedekiljoonaa', 'singular': 'sedekiljoona'},
33: {'plural': 'sedekiljardia', 'singular': 'sedekiljardi'},
34: {'plural': 'septendekiljoonaa', 'singular': 'septendekiljoona'},
35: {'plural': 'septendekiljardia', 'singular': 'septendekiljardi'},
36: {'plural': 'duodevigintiljoonaa', 'singular': 'duodevigintiljoona'},
37: {'plural': 'duodevigintiljardia', 'singular': 'duodevigintiljardi'}
}
def tens(number):
if number > 19:
return f'{FINNISH_NUM2WORDS[number//10]}kymmentä'
elif number > 10:
return f'{FINNISH_NUM2WORDS[number-10]}toista'
else:
return f'{FINNISH_NUM2WORDS[number]}'
def singles(number):
if number != 0:
return f'{FINNISH_NUM2WORDS[number]}'
return ''
def hundreds(number):
if number > 1:
return f'{FINNISH_NUM2WORDS[number]}sataa'
else:
return 'sata'
def thousands(number):
if number > 1:
return f'{FINNISH_NUM2WORDS[number]}tuhatta'
else:
return 'tuhat'
def build_string(digits, place):
len_num = len(digits)
num_string = ''
is_between_10_and_19 = int(digits[-2:]) > 9 and int(digits[-2:]) < 20
is_divisible_by_10 = int(digits) % 10 == 0
is_place_gt_0_and_number_1 = int(digits) == 1 and place > 0
if not is_between_10_and_19 and not is_divisible_by_10 and not is_place_gt_0_and_number_1:
num_string = f'{singles(int(digits[-1]))}'
if len_num >= 2 and int(digits[-2:]) > 9 and int(digits) % 100 != 0:
num_string = f'{tens(int(digits[-2:]))}{num_string}'
if len_num >= 3 and int(digits) > 99:
num_string = f'{hundreds(int(digits[-3]))}{num_string}'
if len_num >= 1 and int(digits) >= 1:
type = 'singular' if int(digits) == 1 else 'plural'
num_string = f'{num_string}{PLACE_WORDS[place][type]}'
return num_string
def spell_number(num):
str_num = str(num)
len_num = len(str_num)
offset = 1
splits = (len_num // 3) + 1
num_part = str_num[-3:len(str_num)]
num_str = f''
while num_part:
num_str = f'{build_string(num_part, offset - 1)}{num_str}'
splits -= 1
offset += 1
num_part = str_num[-3*offset:-3*(offset-1)]
return num_str
```
|
github_jupyter
|
FINNISH_NUM2WORDS = {
1: 'yksi',
2: 'kaksi',
3: 'kolme',
4: 'neljä',
5: 'viisi',
6: 'kuusi',
7: 'seitsemän',
8: 'kahdeksan',
9: 'yhdeksän',
10: 'kymmenen'
}
PLACE_WORDS = {
0: {'plural': '', 'singular': ''},
1: {'plural': 'tuhatta', 'singular': 'tuhat'},
2: {'plural': 'miljoonaa', 'singular': 'miljoona'},
3: {'plural': 'miljardia', 'singular': 'miljardi'},
4: {'plural': 'biljoonaa', 'singular': 'biljoona'},
5: {'plural': 'biljardia', 'singular': 'biljardi'},
6: {'plural': 'triljoonaa', 'singular': 'triljoona'},
7: {'plural': 'triljardia', 'singular': 'triljardi'},
8: {'plural': 'kvadriljoonaa', 'singular': 'kvadriljoona'},
9: {'plural': 'kvadriljardia', 'singular': 'kvadriljardi'},
10: {'plural': 'kvintiljoonaa', 'singular': 'kvintiljoona'},
11: {'plural': 'kvintiljardia', 'singular': 'kvintiljardi'},
12: {'plural': 'sekstiljoonaa', 'singular': 'sekstiljoona'},
13: {'plural': 'sekstiljardia', 'singular': 'sekstiljardi'},
14: {'plural': 'septiljoonaa', 'singular': 'septiljoona'},
15: {'plural': 'septiljardia', 'singular': 'septiljardi'},
16: {'plural': 'oktiljoonaa', 'singular': 'oktiljoona'},
17: {'plural': 'oktiljardia', 'singular': 'oktiljardi'},
18: {'plural': 'noviljoonaa', 'singular': 'noviljoona'},
19: {'plural': 'noviljardia', 'singular': 'noviljardi'},
20: {'plural': 'dekiljoonaa', 'singular': 'dekiljoona'},
21: {'plural': 'dekiljardia', 'singular': 'dekiljardi'},
22: {'plural': 'undekiljoonaa', 'singular': 'undekiljoona'},
23: {'plural': 'undekiljardia', 'singular': 'undekiljardi'},
24: {'plural': 'duodekiljoonaa', 'singular': 'duodekiljoona'},
25: {'plural': 'duodekiljardia', 'singular': 'duodekiljardi'},
26: {'plural': 'tredekiljoonaa', 'singular': 'tredekiljoona'},
27: {'plural': 'tredekiljardia', 'singular': 'tredekiljardi'},
28: {'plural': 'kvattuordekiljoonaa', 'singular': 'kvattuordekiljoona'},
29: {'plural': 'kvattuordekiljardia', 'singular': 'kvattuordekiljardi'},
30: {'plural': 'kvindekiljoonaa', 'singular': 'kvindekiljoona'},
31: {'plural': 'kvindekiljardia', 'singular': 'kvindekiljardi'},
32: {'plural': 'sedekiljoonaa', 'singular': 'sedekiljoona'},
33: {'plural': 'sedekiljardia', 'singular': 'sedekiljardi'},
34: {'plural': 'septendekiljoonaa', 'singular': 'septendekiljoona'},
35: {'plural': 'septendekiljardia', 'singular': 'septendekiljardi'},
36: {'plural': 'duodevigintiljoonaa', 'singular': 'duodevigintiljoona'},
37: {'plural': 'duodevigintiljardia', 'singular': 'duodevigintiljardi'}
}
def tens(number):
if number > 19:
return f'{FINNISH_NUM2WORDS[number//10]}kymmentä'
elif number > 10:
return f'{FINNISH_NUM2WORDS[number-10]}toista'
else:
return f'{FINNISH_NUM2WORDS[number]}'
def singles(number):
if number != 0:
return f'{FINNISH_NUM2WORDS[number]}'
return ''
def hundreds(number):
if number > 1:
return f'{FINNISH_NUM2WORDS[number]}sataa'
else:
return 'sata'
def thousands(number):
if number > 1:
return f'{FINNISH_NUM2WORDS[number]}tuhatta'
else:
return 'tuhat'
def build_string(digits, place):
len_num = len(digits)
num_string = ''
is_between_10_and_19 = int(digits[-2:]) > 9 and int(digits[-2:]) < 20
is_divisible_by_10 = int(digits) % 10 == 0
is_place_gt_0_and_number_1 = int(digits) == 1 and place > 0
if not is_between_10_and_19 and not is_divisible_by_10 and not is_place_gt_0_and_number_1:
num_string = f'{singles(int(digits[-1]))}'
if len_num >= 2 and int(digits[-2:]) > 9 and int(digits) % 100 != 0:
num_string = f'{tens(int(digits[-2:]))}{num_string}'
if len_num >= 3 and int(digits) > 99:
num_string = f'{hundreds(int(digits[-3]))}{num_string}'
if len_num >= 1 and int(digits) >= 1:
type = 'singular' if int(digits) == 1 else 'plural'
num_string = f'{num_string}{PLACE_WORDS[place][type]}'
return num_string
def spell_number(num):
str_num = str(num)
len_num = len(str_num)
offset = 1
splits = (len_num // 3) + 1
num_part = str_num[-3:len(str_num)]
num_str = f''
while num_part:
num_str = f'{build_string(num_part, offset - 1)}{num_str}'
splits -= 1
offset += 1
num_part = str_num[-3*offset:-3*(offset-1)]
return num_str
| 0.264358 | 0.474509 |
**Run the following two cells before you begin.**
```
%autosave 10
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.pipeline import Pipeline
import seaborn as sns
%matplotlib inline
```
**First, import the cleaned data set. Then, select the features from the DataFrame of the case study data.**
These features should be: `'LIMIT_BAL'`, `'EDUCATION'`, `'MARRIAGE'`, `'AGE'`, `'PAY_1'`, `'BILL_AMT1'`, `'BILL_AMT2'`, `'BILL_AMT3'`, `'BILL_AMT4'`, `'BILL_AMT5'`, `'BILL_AMT6'`, `'PAY_AMT1'`, `'PAY_AMT2'`, `'PAY_AMT3'`, `'PAY_AMT4'`, `'PAY_AMT5'`, AND `'PAY_AMT6'`.
```
# Import data set
data = pd.read_csv('cleaned_data.csv')
# Create features list
feat=['LIMIT_BAL', 'EDUCATION', 'MARRIAGE', 'AGE', 'PAY_1', 'BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5','PAY_AMT6']
```
_____________________________________________________
**Next, make a 80:20 train/test split using a random seed of 24.**
```
X = data[feat].values
y = data['default payment next month'].values
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.20, random_state=24)
```
_____________________________________________________
**Then, instantiate the `MinMaxScaler` to scale the data.**
```
minmax = MinMaxScaler()
```
_____________________________________________________
**Next, instantiate a logistic regression model with the `saga` solver, L1 penalty, and set `max_iter` to 1,000 as we want the solver to have enough iterations to find a good solution.**
```
lr = LogisticRegression(solver='saga',max_iter=1000,penalty='l1')
```
_____________________________________________________
**Next, import the `Pipeline` class and create a `Pipeline` with the scaler and the logistic regression model, using the names `'scaler'` and `'model'` for the steps, respectively.**
```
pip = Pipeline(steps=[('scaler',minmax),('model',lr)])
```
_____________________________________________________
**Now, use the `get_params` method to view the parameters from each stage of the pipeline.**
```
# Use `get_params`
pip.get_params
```
**Use the `set_params` method to change the the `model__C` parameter to 2.**
```
# View what `model__C` is set to currently
pip.get_params('model__C')
# Change `model__C` to 2
pip.set_params(model__C=2)
```
_____________________________________________________
**Then, create a smaller range of C values to test with cross-validation, as these models will take longer to train and test with more data than our previous activities.**
**Use C_vals = [$10^2$, $10$, $1$, $10^{-1}$, $10^{-2}$, $10^{-3}$].**
<details>
<summary>Hint:</summary>
Recall that exponents in Python use the ** operator.
</details>
```
C_vals = [10**2, 10, 1,0.1,0.01,0.001]
```
Now, define `k_folds` using `StratifiedKFold`. The number of folds should be 4. Set the random state to 1.
```
k_folds = StratifiedKFold(n_splits=4,random_state=1)
```
_____________________________________________________
**Next, make a new version of the `cross_val_C_search` function, called `cross_val_C_search_pipe`. Instead of the model argument, this function will take a pipeline argument. The changes inside the function will be to set the `C` value using `set_params(model__C = <value you want to test>)` on the pipeline, replacing the model with the pipeline for the fit and `predict_proba` methods, and accessing the `C` value using `pipeline.get_params()['model__C']` for the printed status update.**
```
def avg(pipeline,k_fold,X,Y):
training=[]
testing=[]
for train_index, test_index in k_fold.split(X, Y):
#Subset the features and response, for training and testing data for
#this fold
X_cv_train, X_cv_test = X[train_index], X[test_index]
y_cv_train, y_cv_test = Y[train_index], Y[test_index]
#Fit the model on the training data
pipeline.fit(X_cv_train, y_cv_train)
#Get the training ROC AUC
y_cv_train_predict_proba = pipeline.predict_proba(X_cv_train)
training.append(roc_auc_score(y_cv_train, y_cv_train_predict_proba[:,1]))
#Get the testing ROC AUC
y_cv_test_predict_proba = pipeline.predict_proba(X_cv_test)
testing.append(roc_auc_score(y_cv_test, y_cv_test_predict_proba[:,1]))
a=[np.mean(training),np.mean(testing)]
return a
def cross_val_C_search_pipe(k_folds, C_vals, pipeline, X, Y):
cv_train_roc_auc = []
cv_test_roc_auc = []
for c_val_counter in range(len(C_vals)):
#Set the C value for the model object
pipeline.set_params(model__C = C_vals[c_val_counter])
#Count folds for each value of C
#Get training and testing indices for each fold
b,c=avg(pipeline,k_folds,X,Y)
cv_train_roc_auc.append(b)
cv_test_roc_auc.append(c)
#Indicate progress
# print(f'Done with C = {pipeline.get_params()['model__C']}')
return cv_train_roc_auc, cv_test_roc_auc
tra,tes = cross_val_C_search_pipe(k_folds,C_vals,pip,X,y)
```
_____________________________________________________
**Now, run this function as in the previous activity, but using the new range of `C` values, the pipeline you created, and the features and response variable from the training split of the case study data.**
You may see warnings here, or in later steps, about the non-convergence of the solver; you could experiment with the `tol` or `max_iter`` options to try and achieve convergence, although the results you obtain with `max_iter = 1000` are likely to be sufficient.
_____________________________________________________
**Plot the average training and testing ROC AUC across folds, for each `np.log(C_vals)` value.**
```
plt.figure(figsize=(10,6))
plt.plot(np.log10(C_vals),tra, marker='o', markersize=10,label='train')
plt.plot(np.log10(C_vals),tes, marker='x', markersize=10,label='test')
plt.title('Cross validation')
plt.xlabel('log10(C)')
plt.ylabel('ROC AUC')
plt.legend()
```
_____________________________________________________
**Up next, create interaction features for the case study data using scikit-learn's `PolynomialFeatures`. You should use 2 as the degree of polynomial features. Confirm that the number of new features makes sense.**
```
poly= PolynomialFeatures(degree=2, interaction_only=True, include_bias=False)
data_poly=poly.fit_transform(data[feat])
data_poly.shape,y.shape
```
_____________________________________________________
**Finally, repeat the cross-validation procedure and observe the model performance now.**
```
# Using the new features, make a 80:20 train/test split using a random seed of 24.**
X_train_p, X_test_p, y_train_p, y_test_p = train_test_split(data_poly, y, test_size=0.20, random_state=24)
# Call the cross_val_C_search_pipe() function using the new training data.
# All other parameters should remain the same.
# Note that this training may take a few minutes due to the larger number of features.
tra_p,tes_p=cross_val_C_search_pipe(k_folds,C_vals,pip,X_train_p,y_train_p)
# Plot the average training and testing ROC AUC across folds, for each C value.
plt.figure(figsize=(10,6))
plt.plot(np.log10(C_vals),tra_p, marker='^', markersize=10,label='train')
plt.plot(np.log10(C_vals),tes_p, marker='x', markersize=10,label='test')
plt.title('Cross validation')
plt.xlabel('log10(C)')
plt.ylabel('ROC AUC')
plt.legend()
```
**Take a look at the above graph. Does the average cross-validation testing performance improve with the interaction features? Is regularization useful?**
```
#Yes IT IS USEFUL
```
|
github_jupyter
|
%autosave 10
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.pipeline import Pipeline
import seaborn as sns
%matplotlib inline
# Import data set
data = pd.read_csv('cleaned_data.csv')
# Create features list
feat=['LIMIT_BAL', 'EDUCATION', 'MARRIAGE', 'AGE', 'PAY_1', 'BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5','PAY_AMT6']
X = data[feat].values
y = data['default payment next month'].values
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.20, random_state=24)
minmax = MinMaxScaler()
lr = LogisticRegression(solver='saga',max_iter=1000,penalty='l1')
pip = Pipeline(steps=[('scaler',minmax),('model',lr)])
# Use `get_params`
pip.get_params
# View what `model__C` is set to currently
pip.get_params('model__C')
# Change `model__C` to 2
pip.set_params(model__C=2)
C_vals = [10**2, 10, 1,0.1,0.01,0.001]
k_folds = StratifiedKFold(n_splits=4,random_state=1)
def avg(pipeline,k_fold,X,Y):
training=[]
testing=[]
for train_index, test_index in k_fold.split(X, Y):
#Subset the features and response, for training and testing data for
#this fold
X_cv_train, X_cv_test = X[train_index], X[test_index]
y_cv_train, y_cv_test = Y[train_index], Y[test_index]
#Fit the model on the training data
pipeline.fit(X_cv_train, y_cv_train)
#Get the training ROC AUC
y_cv_train_predict_proba = pipeline.predict_proba(X_cv_train)
training.append(roc_auc_score(y_cv_train, y_cv_train_predict_proba[:,1]))
#Get the testing ROC AUC
y_cv_test_predict_proba = pipeline.predict_proba(X_cv_test)
testing.append(roc_auc_score(y_cv_test, y_cv_test_predict_proba[:,1]))
a=[np.mean(training),np.mean(testing)]
return a
def cross_val_C_search_pipe(k_folds, C_vals, pipeline, X, Y):
cv_train_roc_auc = []
cv_test_roc_auc = []
for c_val_counter in range(len(C_vals)):
#Set the C value for the model object
pipeline.set_params(model__C = C_vals[c_val_counter])
#Count folds for each value of C
#Get training and testing indices for each fold
b,c=avg(pipeline,k_folds,X,Y)
cv_train_roc_auc.append(b)
cv_test_roc_auc.append(c)
#Indicate progress
# print(f'Done with C = {pipeline.get_params()['model__C']}')
return cv_train_roc_auc, cv_test_roc_auc
tra,tes = cross_val_C_search_pipe(k_folds,C_vals,pip,X,y)
plt.figure(figsize=(10,6))
plt.plot(np.log10(C_vals),tra, marker='o', markersize=10,label='train')
plt.plot(np.log10(C_vals),tes, marker='x', markersize=10,label='test')
plt.title('Cross validation')
plt.xlabel('log10(C)')
plt.ylabel('ROC AUC')
plt.legend()
poly= PolynomialFeatures(degree=2, interaction_only=True, include_bias=False)
data_poly=poly.fit_transform(data[feat])
data_poly.shape,y.shape
# Using the new features, make a 80:20 train/test split using a random seed of 24.**
X_train_p, X_test_p, y_train_p, y_test_p = train_test_split(data_poly, y, test_size=0.20, random_state=24)
# Call the cross_val_C_search_pipe() function using the new training data.
# All other parameters should remain the same.
# Note that this training may take a few minutes due to the larger number of features.
tra_p,tes_p=cross_val_C_search_pipe(k_folds,C_vals,pip,X_train_p,y_train_p)
# Plot the average training and testing ROC AUC across folds, for each C value.
plt.figure(figsize=(10,6))
plt.plot(np.log10(C_vals),tra_p, marker='^', markersize=10,label='train')
plt.plot(np.log10(C_vals),tes_p, marker='x', markersize=10,label='test')
plt.title('Cross validation')
plt.xlabel('log10(C)')
plt.ylabel('ROC AUC')
plt.legend()
#Yes IT IS USEFUL
| 0.722037 | 0.906363 |
# A Simple Autoencoder
We'll start off by building a simple autoencoder to compress the MNIST dataset. With autoencoders, we pass input data through an encoder that makes a compressed representation of the input. Then, this representation is passed through a decoder to reconstruct the input data. Generally the encoder and decoder will be built with neural networks, then trained on example data.

In this notebook, we'll be build a simple network architecture for the encoder and decoder. Let's get started by importing our libraries and getting the dataset.
```
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
```
Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.
```
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
```
We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a **single ReLU hidden layer**. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a **sigmoid activation on the output layer** to get values matching the input.

> **Exercise:** Build the graph for the autoencoder in the cell below. The input images will be flattened into 784 length vectors. The targets are the same as the inputs. And there should be one hidden layer with a ReLU activation and an output layer with a sigmoid activation. Feel free to use TensorFlow's higher level API, `tf.layers`. For instance, you would use [`tf.layers.dense(inputs, units, activation=tf.nn.relu)`](https://www.tensorflow.org/api_docs/python/tf/layers/dense) to create a fully connected layer with a ReLU activation. The loss should be calculated with the cross-entropy loss, there is a convenient TensorFlow function for this `tf.nn.sigmoid_cross_entropy_with_logits` ([documentation](https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits)). You should note that `tf.nn.sigmoid_cross_entropy_with_logits` takes the logits, but to get the reconstructed images you'll need to pass the logits through the sigmoid function.
```
# Size of the encoding layer (the hidden layer)
encoding_dim = 32 # feel free to change this value
img_size_1d = mnist.train.images.shape[1]
# note: since we didn't set reshape=True, images are loaded by default as 728-vectors
# Input and target placeholders
inputs_ = tf.placeholder(tf.float32, [None, img_size_1d])
targets_ = tf.placeholder(tf.float32, [None, img_size_1d])
# Output of hidden layer, single fully connected layer here with ReLU activation
encoded = tf.layers.dense(inputs_, units= encoding_dim, activation=tf.nn.relu)
# Output layer logits, fully connected layer with no activation
logits = tf.layers.dense(encoded, units=img_size_1d, activation=None)
# Sigmoid output from logits
decoded = tf.nn.sigmoid(logits)
# Sigmoid cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets_)
# Mean of the loss
cost = tf.reduce_mean(loss)
# Adam optimizer
opt = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
```
## Training
```
# Create the session
sess = tf.Session()
```
Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss.
Calling `mnist.train.next_batch(batch_size)` will return a tuple of `(images, labels)`. We're not concerned with the labels here, we just need the images. Otherwise this is pretty straightfoward training with TensorFlow. We initialize the variables with `sess.run(tf.global_variables_initializer())`. Then, run the optimizer and get the loss with `batch_cost, _ = sess.run([cost, opt], feed_dict=feed)`.
```
epochs = 20
batch_size = 200
disp_steps = 50
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size) # note: x, y = batch = mnist.train.next_batch; x = batch[0]
feed = {inputs_: batch[0], targets_: batch[0]} # note: input and target are both the same
batch_cost, _ = sess.run([cost, opt], feed_dict=feed)
if ii % disp_steps == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
```
## Checking out the results
Below I've plotted some of the test images along with their reconstructions. For the most part these look pretty good except for some blurriness in some parts.
```
print(axes.shape)
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
```
## Up Next
We're dealing with images here, so we can (usually) get better performance using convolution layers. So, next we'll build a better autoencoder with convolutional layers.
In practice, autoencoders aren't actually better at compression compared to typical methods like JPEGs and MP3s. But, they are being used for noise reduction, which you'll also build.
|
github_jupyter
|
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
# Size of the encoding layer (the hidden layer)
encoding_dim = 32 # feel free to change this value
img_size_1d = mnist.train.images.shape[1]
# note: since we didn't set reshape=True, images are loaded by default as 728-vectors
# Input and target placeholders
inputs_ = tf.placeholder(tf.float32, [None, img_size_1d])
targets_ = tf.placeholder(tf.float32, [None, img_size_1d])
# Output of hidden layer, single fully connected layer here with ReLU activation
encoded = tf.layers.dense(inputs_, units= encoding_dim, activation=tf.nn.relu)
# Output layer logits, fully connected layer with no activation
logits = tf.layers.dense(encoded, units=img_size_1d, activation=None)
# Sigmoid output from logits
decoded = tf.nn.sigmoid(logits)
# Sigmoid cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets_)
# Mean of the loss
cost = tf.reduce_mean(loss)
# Adam optimizer
opt = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
# Create the session
sess = tf.Session()
epochs = 20
batch_size = 200
disp_steps = 50
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size) # note: x, y = batch = mnist.train.next_batch; x = batch[0]
feed = {inputs_: batch[0], targets_: batch[0]} # note: input and target are both the same
batch_cost, _ = sess.run([cost, opt], feed_dict=feed)
if ii % disp_steps == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
print(axes.shape)
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
| 0.794744 | 0.993229 |
## The data block API
```
from fastai.gen_doc.nbdoc import *
from fastai.basics import *
np.random.seed(42)
```
The data block API lets you customize the creation of a [`DataBunch`](/basic_data.html#DataBunch) by isolating the underlying parts of that process in separate blocks, mainly:
1. Where are the inputs and how to create them?
1. How to split the data into a training and validation sets?
1. How to label the inputs?
1. What transforms to apply?
1. How to add a test set?
1. How to wrap in dataloaders and create the [`DataBunch`](/basic_data.html#DataBunch)?
Each of these may be addressed with a specific block designed for your unique setup. Your inputs might be in a folder, a csv file, or a dataframe. You may want to split them randomly, by certain indices or depending on the folder they are in. You can have your labels in your csv file or your dataframe, but it may come from folders or a specific function of the input. You may choose to add data augmentation or not. A test set is optional too. Finally you have to set the arguments to put the data together in a [`DataBunch`](/basic_data.html#DataBunch) (batch size, collate function...)
The data block API is called as such because you can mix and match each one of those blocks with the others, allowing for a total flexibility to create your customized [`DataBunch`](/basic_data.html#DataBunch) for training, validation and testing. The factory methods of the various [`DataBunch`](/basic_data.html#DataBunch) are great for beginners but you can't always make your data fit in the tracks they require.
<img src="imgs/mix_match.png" alt="Mix and match" width="200">
As usual, we'll begin with end-to-end examples, then switch to the details of each of those parts.
## Examples of use
Let's begin with our traditional MNIST example.
```
from fastai.vision import *
path = untar_data(URLs.MNIST_TINY)
tfms = get_transforms(do_flip=False)
path.ls()
(path/'train').ls()
```
In [`vision.data`](/vision.data.html#vision.data), we can create a [`DataBunch`](/basic_data.html#DataBunch) suitable for image classification by simply typing:
```
data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=64)
```
This is a shortcut method which is aimed at data that is in folders following an ImageNet style, with the [`train`](/train.html#train) and `valid` directories, each containing one subdirectory per class, where all the labelled pictures are. There is also a `test` directory containing unlabelled pictures.
Here is the same code, but this time using the data block API, which can work with any style of a dataset. All the stages, which will be explained below, can be grouped together like this:
```
data = (ImageList.from_folder(path) #Where to find the data? -> in path and its subfolders
.split_by_folder() #How to split in train/valid? -> use the folders
.label_from_folder() #How to label? -> depending on the folder of the filenames
.add_test_folder() #Optionally add a test set (here default name is test)
.transform(tfms, size=64) #Data augmentation? -> use tfms with a size of 64
.databunch()) #Finally? -> use the defaults for conversion to ImageDataBunch
```
Now we can look at the created DataBunch:
```
data.show_batch(3, figsize=(6,6), hide_axis=False)
```
Let's look at another example from [`vision.data`](/vision.data.html#vision.data) with the planet dataset. This time, it's a multiclassification problem with the labels in a csv file and no given split between valid and train data, so we use a random split. The factory method is:
```
planet = untar_data(URLs.PLANET_TINY)
planet_tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.)
pd.read_csv(planet/"labels.csv").head()
data = ImageDataBunch.from_csv(planet, folder='train', size=128, suffix='.jpg', label_delim = ' ', ds_tfms=planet_tfms)
```
With the data block API we can rewrite this like that:
```
planet.ls()
pd.read_csv(planet/"labels.csv").head()
data = (ImageList.from_csv(planet, 'labels.csv', folder='train', suffix='.jpg')
#Where to find the data? -> in planet 'train' folder
.split_by_rand_pct()
#How to split in train/valid? -> randomly with the default 20% in valid
.label_from_df(label_delim=' ')
#How to label? -> use the second column of the csv file and split the tags by ' '
.transform(planet_tfms, size=128)
#Data augmentation? -> use tfms with a size of 128
.databunch())
#Finally -> use the defaults for conversion to databunch
data.show_batch(rows=2, figsize=(9,7))
```
The data block API also allows you to get your data together in problems for which there is no direct [`ImageDataBunch`](/vision.data.html#ImageDataBunch) factory method. For a segmentation task, for instance, we can use it to quickly get a [`DataBunch`](/basic_data.html#DataBunch). Let's take the example of the [camvid dataset](http://mi.eng.cam.ac.uk/research/projects/VideoRec/CamVid/). The images are in an 'images' folder and their corresponding mask is in a 'labels' folder.
```
camvid = untar_data(URLs.CAMVID_TINY)
path_lbl = camvid/'labels'
path_img = camvid/'images'
```
We have a file that gives us the names of the classes (what each code inside the masks corresponds to: a pedestrian, a tree, a road...)
```
codes = np.loadtxt(camvid/'codes.txt', dtype=str); codes
```
And we define the following function that infers the mask filename from the image filename.
```
get_y_fn = lambda x: path_lbl/f'{x.stem}_P{x.suffix}'
```
Then we can easily define a [`DataBunch`](/basic_data.html#DataBunch) using the data block API. Here we need to use `tfm_y=True` in the transform call because we need the same transforms to be applied to the target mask as were applied to the image. Side note: For further control over which transformations are used on the target, each transformation has a `use_on_y` parameter
```
data = (SegmentationItemList.from_folder(path_img)
#Where to find the data? -> in path_img and its subfolders
.split_by_rand_pct()
#How to split in train/valid? -> randomly with the default 20% in valid
.label_from_func(get_y_fn, classes=codes)
#How to label? -> use the label function on the file name of the data
.transform(get_transforms(), tfm_y=True, size=128)
#Data augmentation? -> use tfms with a size of 128, also transform the label images
.databunch())
#Finally -> use the defaults for conversion to databunch
data.show_batch(rows=2, figsize=(7,5))
```
Another example for object detection. We use our tiny sample of the [COCO dataset](http://cocodataset.org/#home) here. There is a helper function in the library that reads the annotation file and returns the list of images names with the list of labelled bboxes associated to it. We convert it to a dictionary that maps image names with their bboxes and then write the function that will give us the target for each image filename.
```
coco = untar_data(URLs.COCO_TINY)
images, lbl_bbox = get_annotations(coco/'train.json')
img2bbox = dict(zip(images, lbl_bbox))
get_y_func = lambda o:img2bbox[o.name]
```
The following code is very similar to what we saw before. The only new addition is the use of a special function to collate the samples in batches. This comes from the fact that our images may have multiple bounding boxes, so we need to pad them to the largest number of bounding boxes.
```
data = (ObjectItemList.from_folder(coco)
#Where are the images? -> in coco and its subfolders
.split_by_rand_pct()
#How to split in train/valid? -> randomly with the default 20% in valid
.label_from_func(get_y_func)
#How to find the labels? -> use get_y_func on the file name of the data
.transform(get_transforms(), tfm_y=True)
#Data augmentation? -> Standard transforms; also transform the label images
.databunch(bs=16, collate_fn=bb_pad_collate))
#Finally we convert to a DataBunch, use a batch size of 16,
# and we use bb_pad_collate to collate the data into a mini-batch
data.show_batch(rows=2, ds_type=DatasetType.Valid, figsize=(6,6))
```
But vision isn't the only application where the data block API works. It can also be used for text and tabular data. With our sample of the IMDB dataset (labelled texts in a csv file), here is how to get the data together for a language model.
```
from fastai.text import *
imdb = untar_data(URLs.IMDB_SAMPLE)
data_lm = (TextList
.from_csv(imdb, 'texts.csv', cols='text')
#Where are the text? Column 'text' of texts.csv
.split_by_rand_pct()
#How to split it? Randomly with the default 20% in valid
.label_for_lm()
#Label it for a language model
.databunch())
#Finally we convert to a DataBunch
data_lm.show_batch()
```
For a classification problem, we just have to change the way labeling is done. Here we use the csv column `label`.
```
data_clas = (TextList.from_csv(imdb, 'texts.csv', cols='text')
.split_from_df(col='is_valid')
.label_from_df(cols='label')
.databunch())
data_clas.show_batch()
```
Lastly, for tabular data, we just have to pass the name of our categorical and continuous variables as an extra argument. We also add some [`PreProcessor`](/data_block.html#PreProcessor)s that are going to be applied to our data once the splitting and labelling is done.
```
from fastai.tabular import *
adult = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(adult/'adult.csv')
dep_var = 'salary'
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']
cont_names = ['education-num', 'hours-per-week', 'age', 'capital-loss', 'fnlwgt', 'capital-gain']
procs = [FillMissing, Categorify, Normalize]
data = (TabularList.from_df(df, path=adult, cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(valid_idx=range(800,1000))
.label_from_df(cols=dep_var)
.databunch())
data.show_batch()
```
## Step 1: Provide inputs
The basic class to get your inputs is the following one. It's also the same class that will contain all of your labels (hence the name [`ItemList`](/data_block.html#ItemList)).
```
show_doc(ItemList, title_level=3)
```
This class regroups the inputs for our model in `items` and saves a `path` attribute which is where it will look for any files (image files, csv file with labels...). `label_cls` will be called to create the labels from the result of the label function, `inner_df` is an underlying dataframe, and `processor` is to be applied to the inputs after the splitting and labeling.
It has multiple subclasses depending on the type of data you're handling. Here is a quick list:
- [`CategoryList`](/data_block.html#CategoryList) for labels in classification
- [`MultiCategoryList`](/data_block.html#MultiCategoryList) for labels in a multi classification problem
- [`FloatList`](/data_block.html#FloatList) for float labels in a regression problem
- [`ImageList`](/vision.data.html#ImageList) for data that are images
- [`SegmentationItemList`](/vision.data.html#SegmentationItemList) like [`ImageList`](/vision.data.html#ImageList) but will default labels to [`SegmentationLabelList`](/vision.data.html#SegmentationLabelList)
- [`SegmentationLabelList`](/vision.data.html#SegmentationLabelList) for segmentation masks
- [`ObjectItemList`](/vision.data.html#ObjectItemList) like [`ImageList`](/vision.data.html#ImageList) but will default labels to `ObjectLabelList`
- `ObjectLabelList` for object detection
- [`PointsItemList`](/vision.data.html#PointsItemList) for points (of the type [`ImagePoints`](/vision.image.html#ImagePoints))
- [`ImageImageList`](/vision.data.html#ImageImageList) for image to image tasks
- [`TextList`](/text.data.html#TextList) for text data
- [`TextList`](/text.data.html#TextList) for text data stored in files
- [`TabularList`](/tabular.data.html#TabularList) for tabular data
- [`CollabList`](/collab.html#CollabList) for collaborative filtering
We can get a little glimpse of how [`ItemList`](/data_block.html#ItemList)'s basic attributes and methods behave with the following code examples.
```
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
il_data = ItemList.from_folder(path_data, extensions=['.csv'])
il_data
```
Here is how to access the path of [`ItemList`](/data_block.html#ItemList) and the actual `items` (here files) in the path.
```
il_data.path
il_data.items
```
`len(il_data)` gives you the count of files inside `il_data` and you can access individual items using index.
```
len(il_data)
```
[`ItemList`](/data_block.html#ItemList) returns a single item with a single index, but returns an [`ItemList`](/data_block.html#ItemList) if given a list of indexes.
```
il_data[1]
il_data[:1]
```
With `il_data.add` we can perform in_place concatenate another [`ItemList`](/data_block.html#ItemList) object.
```
il_data.add(il_data); il_data
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
itemlist = ItemList.from_folder(path_data/'test')
itemlist
```
As we can see, the files do not necesarily return in alpha-numeric order by default. In the above: 1503.png, ... 617.png, 585.png ...
This is OK when you're always using the same machine, as the same dataset should return in the same order. But when building a datablock on one machine (say GCP) and then porting the same code to a different machine (say your laptop) that same dataset and code might return the files in a different order.
Since all random operations use the loaded order of the dataset as the starting point, you will not be able to replicate any random operations, say randomly splitting the data into 80% train, and 20% validation, even while correctly seeding.
The solution is to use `presort=True` in the `.from_folder()` method. As can be seen below, with that argument turned on, the file is returned in ascending order, and this behavior will match across machines and across platforms. Now you can reproduce any random operation you perform on the loaded data.
```
itemlist = ItemList.from_folder(path_data/'test', presort=True)
itemlist
```
How is the output above generated?
behind the scenes, executing `itemlist` calls [`ItemList.__repr__`](/data_block.html#ItemList.__repr__) which basically prints out `itemlist[0]` to `itemlist[4]`
```
itemlist[0]
```
and `itemlist[0]` basically calls `itemlist.get(0)` which returns `itemlist.items[0]`. That's why we have outputs like above.
Once you have selected the class that is suitable, you can instantiate it with one of the following factory methods
```
show_doc(ItemList.from_folder)
path = untar_data(URLs.MNIST_TINY)
path.ls()
ImageList.from_folder(path)
```
`path` is your root data folder. In the `path` directory you have _train_ and _valid_ folders which would contain your images. For the below example, _train_ folder contains two folders/classes _cat_ and _dog_.
<img src="imgs/from_folder.png" alt="from_folder">
```
show_doc(ItemList.from_df)
```
Dataframe has 2 columns. The first column is the path to the image and the second column contains label id for that image. In case you have multi-labels (i.e more than one label for a single image), you will have a space (as determined by `label_delim` argument of `label_from_df`) seperated string in the labels column.
`from_df` and `from_csv` can be used in a more general way. In cases you are not able to figure out how to get your ImageList, it is very easy to make a csv file with the above format.
How to set `path`? `path` refers to your root data directory. So the paths in your csv file should be relative to `path` and not absolute paths. In the below example, in _labels.csv_ the paths to the images are __path + train/3/7463.png__
```
path = untar_data(URLs.MNIST_SAMPLE)
path.ls()
df = pd.read_csv(path/'labels.csv')
df.head()
ImageList.from_df(df, path)
show_doc(ItemList.from_csv)
path = untar_data(URLs.MNIST_SAMPLE)
path.ls()
ImageList.from_csv(path, 'labels.csv')
```
### Optional step: filter your data
The factory method may have grabbed too many items. For instance, if you were searching sub folders with the `from_folder` method, you may have gotten files you don't want. To remove those, you can use one of the following methods.
```
show_doc(ItemList.filter_by_func)
path = untar_data(URLs.MNIST_SAMPLE)
df = pd.read_csv(path/'labels.csv')
df.head()
```
Suppose that you only want to keep images with a suffix ".png". Well, this method will do magic for you.
```
Path(df.name[0]).suffix
ImageList.from_df(df, path).filter_by_func(lambda fname: Path(fname).suffix == '.png')
show_doc(ItemList.filter_by_folder)
show_doc(ItemList.filter_by_rand)
path = untar_data(URLs.MNIST_SAMPLE)
ImageList.from_folder(path).filter_by_rand(0.5)
```
Contrast the number of items with the list created without the filter.
```
ImageList.from_folder(path)
show_doc(ItemList.to_text)
path = untar_data(URLs.MNIST_SAMPLE)
pd.read_csv(path/'labels.csv').head()
file_name = "item_list.txt"
ImageList.from_folder(path).to_text(file_name)
! cat {path/file_name} | head
show_doc(ItemList.use_partial_data)
path = untar_data(URLs.MNIST_SAMPLE)
ImageList.from_folder(path).use_partial_data(0.5)
```
Contrast the number of items with the list created without the filter.
```
ImageList.from_folder(path)
```
### Writing your own [`ItemList`](/data_block.html#ItemList)
First check if you can't easily customize one of the existing subclass by:
- subclassing an existing one and replacing the `get` method (or the `open` method if you're dealing with images)
- applying a custom `processor` (see step 4)
- changing the default `label_cls` for the label creation
- adding a default [`PreProcessor`](/data_block.html#PreProcessor) with the `_processor` class variable
If this isn't the case and you really need to write your own class, there is a [full tutorial](/tutorial.itemlist) that explains how to proceed.
```
show_doc(ItemList.analyze_pred)
show_doc(ItemList.get)
```
We will have a glimpse of how `get` work with the following demo.
```
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
il_data_base = ItemList.from_folder(path=path_data, extensions=['.png'], include=['test'])
il_data_base
```
`get` is used inexplicitly within `il_data_base[15]`. `il_data_base.get(15)` gives the same result here, because its defulat it's to return that.
```
il_data_base[15]
```
While creating your custom [`ItemList`](/data_block.html#ItemList) however, you can override this function to do some things to your item (like opening an image).
```
il_data_image = ImageList.from_folder(path=path_data, extensions=['.png'], include=['test'])
il_data_image
```
Again, normally `get` is used inexplicitly within `il_data_image[15]`.
```
il_data_image[15]
```
The reason why an image is printed out instead of a FilePath object, is [`ImageList.get`](/vision.data.html#ImageList.get) overwrites [`ItemList.get`](/data_block.html#ItemList.get) and use [`ImageList.open`](/vision.data.html#ImageList.open) to print an image.
```
show_doc(ItemList.new)
```
You'll never need to subclass this normally, just don't forget to add to `self.copy_new` the names of the arguments that needs to be copied each time `new` is called in `__init__`.
We will get a feel of how `new` works with the following examples.
```
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
itemlist1 = ItemList.from_folder(path=path_data/'valid', extensions=['.png'])
itemlist1
```
As you will see below, `copy_new` allows us to borrow any argument and its value from `itemlist1`, and `itemlist1.new(itemlist1.items)` allows us to use `items` and arguments inside `copy_new` to create another [`ItemList`](/data_block.html#ItemList) by calling [`ItemList.__init__`](/data_block.html#ItemList.__init__).
```
itemlist1.copy_new == ['x', 'label_cls', 'path']
((itemlist1.x == itemlist1.label_cls == itemlist1.inner_df == None)
and (itemlist1.path == Path('/Users/Natsume/.fastai/data/mnist_tiny/valid')))
```
You can select any argument from [`ItemList.__init__`](/data_block.html#ItemList.__init__)'s signature and change their values.
```
itemlist1.copy_new = ['x', 'label_cls', 'path', 'inner_df']
itemlist1.x = itemlist1.label_cls = itemlist1.path = itemlist1.inner_df = 'test'
itemlist2 = itemlist1.new(items=itemlist1.items)
(itemlist2.inner_df == itemlist2.x == itemlist2.label_cls == 'test'
and itemlist2.path == Path('test'))
show_doc(ItemList.reconstruct)
```
## Step 2: Split the data between the training and the validation set
This step is normally straightforward, you just have to pick one of the following functions depending on what you need.
```
show_doc(ItemList.split_none)
show_doc(ItemList.split_by_rand_pct)
show_doc(ItemList.split_subsets)
```
This function is handy if you want to work with subsets of specific sizes, e.g., you want to use 20% of the data for the validation dataset, but you only want to train on a small subset of the rest of the data: `split_subsets(train_size=0.08, valid_size=0.2)`.
```
show_doc(ItemList.split_by_files)
show_doc(ItemList.split_by_fname_file)
```
Internally makes a call to `split_by_files`. `fname` contains your image file names like 0001.png.
```
show_doc(ItemList.split_by_folder)
jekyll_note("This method looks at the folder immediately after `self.path` for `valid` and `train`.")
```
Basically, `split_by_folder` takes in two folder names ('train' and 'valid' in the following example), to split `il` the large [`ImageList`](/vision.data.html#ImageList) into two smaller [`ImageList`](/vision.data.html#ImageList)s, one for training set and the other for validation set. Both [`ImageList`](/vision.data.html#ImageList)s are attached to a large [`ItemLists`](/data_block.html#ItemLists) which is the final output of `split_by_folder`.
```
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
il = ItemList.from_folder(path=path_data); il
sd = il.split_by_folder(train='train', valid='valid'); sd
```
Behind the scenes, `split_by_folder` uses `_get_by_folder(name)`, to turn both 'train' and 'valid' folders into two list of indexes, and pass them onto `split_by_idxs` to split `il` into two [`ImageList`](/vision.data.html#ImageList)s, and finally attached to a [`ItemLists`](/data_block.html#ItemLists).
```
train_idx = il._get_by_folder(name='train')
train_idx[:5], train_idx[-5:], len(train_idx)
valid_idx = il._get_by_folder(name='valid')
valid_idx[:5], valid_idx[-5:],len(valid_idx)
```
By the way, `_get_by_folder(name)` works in the following way, first, index the entire `il.items`, loop every item and if an item belongs to the named folder, e.g., 'train', then put it into a list. The folder `name` is the only input, and output is the list.
```
show_doc(ItemList.split_by_idx)
path = untar_data(URLs.MNIST_SAMPLE)
df = pd.read_csv(path/'labels.csv')
df.head()
```
You can pass a list of indices that you want to put in the validation set like [1, 3, 10]. Or you can pass a contiguous list like `list(range(1000))`
```
data = (ImageList.from_df(df, path)
.split_by_idx(list(range(1000))))
data
show_doc(ItemList.split_by_idxs)
```
Behind the scenes, `split_by_idxs` turns two index lists (`train_idx` and `valid_idx`) into two [`ImageList`](/vision.data.html#ImageList)s, and then pass onto `split_by_list` to split `il` into two [`ImageList`](/vision.data.html#ImageList)s and attach to a [`ItemLists`](/data_block.html#ItemLists).
```
sd = il.split_by_idxs(train_idx=train_idx, valid_idx=valid_idx); sd
show_doc(ItemList.split_by_list)
```
`split_by_list` takes in two [`ImageList`](/vision.data.html#ImageList)s which in the case below are `il[train_idx]` and `il[valid_idx]`, and pass them onto `_split` ([`ItemLists`](/data_block.html#ItemLists)) to initialize an [`ItemLists`](/data_block.html#ItemLists) object, which basically takes in the training, valiation and testing (optionally) [`ImageList`](/vision.data.html#ImageList)s as its properties.
```
sd = il.split_by_list(train=il[train_idx], valid=il[valid_idx]); sd
```
This is more of an internal method, you should be using `split_by_files` if you want to pass a list of filenames for the validation set.
```
show_doc(ItemList.split_by_valid_func)
show_doc(ItemList.split_from_df)
```
To use this function, you need a boolean column (default to the third column of the dataframe). The examples put in the validation set correspond to the indices with `True` value in that column.
```
path = untar_data(URLs.MNIST_SAMPLE)
df = pd.read_csv(path/'labels.csv')
# Create a new column for is_valid
df['is_valid'] = [True]*(df.shape[0]//2) + [False]*(df.shape[0]//2)
# Randomly shuffle dataframe
df = df.reindex(np.random.permutation(df.index))
print(df.shape)
df.head()
data = (ImageList.from_df(df, path)
.split_from_df())
data
jekyll_warn("This method assumes the data has been created from a csv file or a dataframe.")
```
## Step 3: Label the inputs
To label your inputs, use one of the following functions. Note that even if it's not in the documented arguments, you can always pass a `label_cls` that will be used to create those labels (the default is the one from your input [`ItemList`](/data_block.html#ItemList), and if there is none, it will go to [`CategoryList`](/data_block.html#CategoryList), [`MultiCategoryList`](/data_block.html#MultiCategoryList) or [`FloatList`](/data_block.html#FloatList) depending on the type of the labels). This is implemented in the following function:
```
show_doc(ItemList.get_label_cls)
```
Behind the scenes, [`ItemList.get_label_cls`](/data_block.html#ItemList.get_label_cls) basically select a label class according to the item type of `labels`, whereas `labels` can be any of `Collection`, `pandas.core.frame.DataFrame`, `pandas.core.series.Series`. If the list elements are of type string or integer, `get_label_cls` will output [`CategoryList`](/data_block.html#CategoryList); if they are of type float, then it will output [`FloatList`](/data_block.html#FloatList); if they are of type Collection, then it will output [`MultiCategoryList`](/data_block.html#MultiCategoryList).
```
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
sd = ImageList.from_folder(path_data).split_by_folder('train', 'valid'); sd
labels = ['7', '3']
label_cls = sd.train.get_label_cls(labels); label_cls
labels = [7, 3]
label_cls = sd.train.get_label_cls(labels); label_cls
labels = [7.0, 3.0]
label_cls = sd.train.get_label_cls(labels); label_cls
labels = [[7, 3],]
label_cls = sd.train.get_label_cls(labels); label_cls
labels = [['7', '3'],]
label_cls = sd.train.get_label_cls(labels); label_cls
```
If no `label_cls` argument is passed, the correct labeling type can usually be inferred based on the data (for classification or regression). If you have multiple regression targets (e.g. predict 5 different numbers from a single image/text), be aware that arrays of floats are by default considered to be targets for one-hot encoded classification. If your task is regression, be sure the pass `label_cls = FloatList` so that learners created from your databunch initialize correctly.
The first example in these docs created labels as follows:
```
path = untar_data(URLs.MNIST_TINY)
ll = ImageList.from_folder(path).split_by_folder().label_from_folder().train
```
If you want to save the data necessary to recreate your [`LabelList`](/data_block.html#LabelList) (not including saving the actual image/text/etc files), you can use `to_df` or `to_csv`:
```python
ll.train.to_csv('tmp.csv')
```
Or just grab a `pd.DataFrame` directly:
```
ll.to_df().head()
show_doc(ItemList.label_empty)
show_doc(ItemList.label_from_df)
jekyll_warn("This method only works with data objects created with either `from_csv` or `from_df` methods.")
show_doc(ItemList.label_const)
show_doc(ItemList.label_from_folder)
jekyll_note("This method looks at the last subfolder in the path to determine the classes.")
```
Behind the scenes, when an [`ItemList`](/data_block.html#ItemList) calls `label_from_folder`, it creates a lambda function which outputs a foldername which a file Path object immediately or directly belongs to, and then calls `label_from_func` with the lambda function as input.
On the practical and high level, `label_from_folder` is mostly used with [`ItemLists`](/data_block.html#ItemLists) rather than [`ItemList`](/data_block.html#ItemList) for simplicity and efficiency, for details see the `label_from_folder` example on [ItemLists](). Even when you just want a training set [`ItemList`](/data_block.html#ItemList), you still need to do `split_none` to create an [`ItemLists`](/data_block.html#ItemLists) and then do labeling with `label_from_folder`, as the example shown below.
```
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
sd_train = ImageList.from_folder(path_data/'train').split_none()
ll_train = sd_train.label_from_folder(); ll_train
show_doc(ItemList.label_from_func)
```
Inside `label_from_func`, it applies the input `func` to every item of an [`ItemList`](/data_block.html#ItemList) and puts all the function outputs into a list, and then passes the list onto [`ItemList._label_from_list`](/data_block.html#ItemList._label_from_list). Below is a simple example of using `label_from_func`.
```
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
sd = ImageList.from_folder(path_data).split_by_folder('train', 'valid');sd
func=lambda o: (o.parts if isinstance(o, Path) else o.split(os.path.sep))[-2]
```
The lambda function above is to access the immediate foldername for a file Path object.
```
ll = sd.label_from_func(func); ll
show_doc(ItemList.label_from_re)
show_doc(CategoryList, title_level=3)
```
[`ItemList`](/data_block.html#ItemList) suitable for storing labels in `items` belonging to `classes`. If `None` are passed, `classes` will be determined by the unique different labels. `processor` will default to [`CategoryProcessor`](/data_block.html#CategoryProcessor).
[`CategoryList`](/data_block.html#CategoryList) uses `labels` to create an [`ItemList`](/data_block.html#ItemList) for dealing with categorical labels. Behind the scenes, [`CategoryList`](/data_block.html#CategoryList) is a subclass of [`CategoryListBase`](/data_block.html#CategoryListBase) which is a subclass of [`ItemList`](/data_block.html#ItemList). [`CategoryList`](/data_block.html#CategoryList) inherits from [`CategoryListBase`](/data_block.html#CategoryListBase) the properties such as `classes` (default as `None`), `filter_missing_y` (default as `True`), and has its own unique property `loss_func` (default as `CrossEntropyFlat()`), and its own class attribute `_processor` (default as [`CategoryProcessor`](/data_block.html#CategoryProcessor)).
```
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
ll = ImageList.from_folder(path_data).split_by_folder('train', 'valid').label_from_folder()
ll.train.y.items, ll.train.y.classes, ll.train.y[0]
cl = CategoryList(ll.train.y.items, ll.train.y.classes); cl
```
For the behavior of printing out [`CategoryList`](/data_block.html#CategoryList) object or access an element using index, please see [`CategoryList.get`](/data_block.html#CategoryList.get) below.
Behind the scenes, [`CategoryList.get`](/data_block.html#CategoryList.get) is used inexplicitly when printing out the [`CategoryList`](/data_block.html#CategoryList) object or `cl[idx]`. According to the source of [`CategoryList.get`](/data_block.html#CategoryList.get), each `item` is used to get its own `class`. When 'classes' is a list of strings, then elements of `items` are used as index of a list, therefore they must be integers in the range from 0 to `len(classes)-1`; if `classes` is a dictionary, then elements of `items` are used as keys, therefore they can be strings too. See examples below for details.
```
from fastai.vision import *
items = np.array([0, 1, 2, 1, 0])
cl = CategoryList(items, classes=['3', '7', '9']); cl
items = np.array(['3', '7', '9', '7', '3'])
classes = {'3':3, '7':7, '9':9}
cl = CategoryList(items, classes); cl
show_doc(MultiCategoryList, title_level=3)
```
It will store a list of labels in `items` belonging to `classes`. If `None` are passed, `classes` will be determined by the different unique labels. `sep` is used to split the content of `items` in a list of tags.
If `one_hot=True`, the items contain the labels one-hot encoded. In this case, it is mandatory to pass a list of `classes` (as we can't use the different labels).
```
show_doc(FloatList, title_level=3)
show_doc(EmptyLabelList, title_level=3)
```
## Invisible step: preprocessing
This isn't seen here in the API, but if you passed a `processor` (or a list of them) in your initial [`ItemList`](/data_block.html#ItemList) during step 1, it will be applied here. If you didn't pass any processor, a list of them might still be created depending on what is in the `_processor` variable of your class of items (this can be a list of [`PreProcessor`](/data_block.html#PreProcessor) classes).
A processor is a transformation that is applied to all the inputs once at initialization, with a state computed on the training set that is then applied without modification on the validation set (and maybe the test set). For instance, it can be processing texts to tokenize then numericalize them. In that case we want the validation set to be numericalized with exactly the same vocabulary as the training set.
Another example is in tabular data, where we fill missing values with (for instance) the median computed on the training set. That statistic is stored in the inner state of the [`PreProcessor`](/data_block.html#PreProcessor) and applied on the validation set.
This is the generic class for all processors.
```
show_doc(PreProcessor, title_level=3)
show_doc(PreProcessor.process_one)
```
Process one `item`. This method needs to be written in any subclass.
```
show_doc(PreProcessor.process)
```
`ds`: an object of [`ItemList`](/data_block.html#ItemList)
Process a dataset. This default to apply `process_one` on every `item` of `ds`.
```
show_doc(CategoryProcessor, title_level=3)
show_doc(CategoryProcessor.generate_classes)
show_doc(CategoryProcessor.process)
```
`ds` is an object of [`CategoryList`](/data_block.html#CategoryList).
It basically generates a list of unique labels (assigned to `ds.classes`) and a dictionary mapping `classes` to indexes (assigned to `ds.c2i`).
It is an internal function only called to apply processors to training, validation and testing datasets after the labeling step.
```
show_doc(MultiCategoryProcessor, title_level=3)
show_doc(MultiCategoryProcessor.generate_classes)
```
## Optional steps
### Add transforms
Transforms differ from processors in the sense they are applied on the fly when we grab one item. They also may change each time we ask for the same item in the case of random transforms.
```
show_doc(LabelLists.transform)
```
This is primary for the vision application. The `kwargs` arguments are the ones expected by the type of transforms you pass. `tfm_y` is among them and if set to `True`, the transforms will be applied to input and target.
For examples see: [vision.transforms](vision.transform.html).
### Add a test set
To add a test set, you can use one of the two following methods.
```
show_doc(LabelLists.add_test)
jekyll_note("Here `items` can be an `ItemList` or a collection.")
show_doc(LabelLists.add_test_folder)
jekyll_warn("In fastai the test set is unlabeled! No labels will be collected even if they are available.")
```
Instead, either the passed `label` argument or an empty label will be used for all entries of this dataset (this is required by the internal pipeline of fastai).
In the `fastai` framework `test` datasets have no labels - this is the unknown data to be predicted. If you want to validate your model on a `test` dataset with labels, you probably need to use it as a validation set, as in:
```
data_test = (ImageList.from_folder(path)
.split_by_folder(train='train', valid='test')
.label_from_folder()
...)
```
Another approach, where you do use a normal validation set, and then when the training is over, you just want to validate the test set w/ labels as a validation set, you can do this:
```
tfms = []
path = Path('data').resolve()
data = (ImageList.from_folder(path)
.split_by_pct()
.label_from_folder()
.transform(tfms)
.databunch()
.normalize() )
learn = cnn_learner(data, models.resnet50, metrics=accuracy)
learn.fit_one_cycle(5,1e-2)
# now replace the validation dataset entry with the test dataset as a new validation dataset:
# everything is exactly the same, except replacing `split_by_pct` w/ `split_by_folder`
# (or perhaps you were already using the latter, so simply switch to valid='test')
data_test = (ImageList.from_folder(path)
.split_by_folder(train='train', valid='test')
.label_from_folder()
.transform(tfms)
.databunch()
.normalize()
)
learn.validate(data_test.valid_dl)
```
Of course, your data block can be totally different, this is just an example.
## Step 4: convert to a [`DataBunch`](/basic_data.html#DataBunch)
This last step is usually pretty straightforward. You just have to include all the arguments we pass to [`DataBunch.create`](/basic_data.html#DataBunch.create) (`bs`, `num_workers`, `collate_fn`). The class called to create a [`DataBunch`](/basic_data.html#DataBunch) is set in the `_bunch` attribute of the inputs of the training set if you need to modify it. Normally, the various subclasses we showed before handle that for you.
```
show_doc(LabelLists.databunch)
```
## Inner classes
```
show_doc(LabelList, title_level=3)
```
Optionally apply `tfms` to `y` if `tfm_y` is `True`.
Behind the scenes, it takes inputs [`ItemList`](/data_block.html#ItemList) and labels [`ItemList`](/data_block.html#ItemList) as its properties `x` and `y`, sets property `item` to `None`, and uses [`LabelList.transform`](/data_block.html#LabelList.transform) to apply a list of transforms `TfmList` to `x` and `y` if `tfm_y` is set `True`.
```
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
ll = ImageList.from_folder(path_data).split_by_folder('train', 'valid').label_from_folder()
ll.train.x, ll.train.y
LabelList(x=ll.train.x, y=ll.train.y)
show_doc(LabelList.export)
show_doc(LabelList.transform_y)
show_doc(LabelList.get_state)
show_doc(LabelList.load_empty)
show_doc(LabelList.load_state)
show_doc(LabelList.process)
```
Behind the scenes, [`LabelList.process`](/data_block.html#LabelList.process) does 3 three things: 1. ask labels `y` to be processed by `yp` with `y.process(yp)`; 2. if `y.filter_missing_y` is `True`, then removes the missing data samples from `x` and `y`; 3. ask inputs `x` to be processed by `xp` with `x.process(xp)`
```
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
sd = ImageList.from_folder(path_data).split_by_folder('train', 'valid')
sd.train = sd.train.label_from_folder(from_item_lists=True)
sd.valid = sd.valid.label_from_folder(from_item_lists=True)
sd.__class__ = LabelLists
xp,yp = sd.get_processors()
xp,yp
sd.train.process(xp, yp)
show_doc(LabelList.set_item)
show_doc(LabelList.to_df)
show_doc(LabelList.to_csv)
show_doc(LabelList.transform)
show_doc(ItemLists, title_level=3)
```
It initializes an [`ItemLists`](/data_block.html#ItemLists) object, which basically brings in the training, valiation and testing (optionally) [`ItemList`](/data_block.html#ItemList)s as its properties. It also offers helpful warning messages on situations when the training or validation [`ItemList`](/data_block.html#ItemList) is empty.
See the following example for how to create an [`ItemLists`](/data_block.html#ItemLists) object.
```
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
il_train = ImageList.from_folder(path_data/'train')
il_valid = ImageList.from_folder(path_data/'valid')
il_test = ImageList.from_folder(path_data/'test')
ils = ItemLists(path=path_data, train=il_train, valid=il_valid); ils
ils.test = il_test; ils
```
However, we are most likely to see an [`ItemLists`](/data_block.html#ItemLists), right after a large [`ItemList`](/data_block.html#ItemList) is splitted and turned into an [`ItemLists`](/data_block.html#ItemLists) by methods like [`ItemList.split_by_folder`](/data_block.html#ItemList.split_by_folder). Then, we will add labels to all training and validation simply using `sd.label_from_folder()` (`sd` is an [`ItemLists`](/data_block.html#ItemLists), see example below). Now, some of you may be surprised because `label_from_folder` is a method of [`ItemList`](/data_block.html#ItemList) not [`ItemLists`](/data_block.html#ItemLists). Well, this is a magic of fastai data_block api.
With the following example, we may understand a little better how to get labelling done by calling [`ItemLists.__getattr__`](/data_block.html#ItemLists.__getattr__) with [`ItemList.label_from_folder`](/data_block.html#ItemList.label_from_folder).
```
il = ImageList.from_folder(path_data); il
```
An [`ItemList`](/data_block.html#ItemList) or its subclass object must do a split to turn itself into an [`ItemLists`](/data_block.html#ItemLists) before doing labeling to become a [`LabelLists`](/data_block.html#LabelLists) object.
```
sd = il.split_by_folder(train='train', valid='valid'); sd
ll = sd.label_from_folder(); ll
```
Even when there is just an [`ImageList`](/vision.data.html#ImageList) from a training set folder with no split needed, we still must do `split_none()` in order to create an [`ItemLists`](/data_block.html#ItemLists), and only then we can do `ItemLists.label_from_folder()` nicely.
```
il_train = ImageList.from_folder(path_data/'train')
sd_train = il_train.split_none(); sd_train
ll_valid_empty = sd_train.label_from_folder(); ll_valid_empty
```
So practially, although `label_from_folder` is not an [`ItemLists`](/data_block.html#ItemLists) method, we can call `ItemLists.label_from_folder()` to label training, validation and test [`ItemList`](/data_block.html#ItemList)s once for all.
Behind the scenes, `ItemLists.label_from_folder()` actually calls `ItemLists.__getattr__('label_from_folder')`, in which all training, validation even testing [`ItemList`](/data_block.html#ItemList) get to call `label_from_folder`, and then turns the [`ItemLists`](/data_block.html#ItemLists) into a [`LabelLists`](/data_block.html#LabelLists) and calls [`LabelLists.process`](/data_block.html#LabelLists.process) at last.
You can directly use `LabelLists.__getattr__` to do labelling as below.
```
ld_inner = sd.__getattr__('label_from_folder'); ld_inner()
show_doc(ItemLists.label_from_lists)
show_doc(ItemLists.transform)
show_doc(ItemLists.transform_y)
show_doc(LabelLists, title_level=3)
```
Creating a [`LabelLists`](/data_block.html#LabelLists) object is exactly the same way as creating an [`ItemLists`](/data_block.html#ItemLists) object, because its base class is [`ItemLists`](/data_block.html#ItemLists) and does not overwrite [`ItemLists.__init__`](/data_block.html#ItemLists.__init__). The example below shows how to build a [`LabelLists`](/data_block.html#LabelLists) object.
```
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
il_train = ImageList.from_folder(path_data/'train')
il_valid = ImageList.from_folder(path_data/'valid')
ll_test = LabelLists(path_data, il_train, il_valid);
ll_test.test = il_valid = ImageList.from_folder(path_data/'test')
ll_test
show_doc(LabelLists.get_processors)
```
Behind the scenes, `LabelLists.get_processors()` first puts `train.x._processor` classes and `train.y._processor` classes into separate lists, and then instantiates those processors and puts them into `xp` and `yp`.
```
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
sd = ImageList.from_folder(path_data).split_by_folder('train', 'valid')
sd.train = sd.train.label_from_folder(from_item_lists=True)
sd.valid = sd.valid.label_from_folder(from_item_lists=True)
sd.__class__ = LabelLists
xp,yp = sd.get_processors()
xp,yp
show_doc(LabelLists.load_empty)
show_doc(LabelLists.load_state)
show_doc(LabelLists.process)
show_doc(ItemList.process)
```
`processor` is one or more `PreProcessors` objects
Behind the scenes, we put all of `processor` into a list and apply them all to an object of [`ItemList`](/data_block.html#ItemList) or its subclasses.
## Helper functions
```
show_doc(get_files)
```
To be more precise, this function returns a list of FilePath objects using files in `path` that must have a suffix in `extensions`, and hidden folders and files are ignored. If `recurse=True`, all files in subfolders will be applied; `include` is used to select particular folders to apply.
Inside [`get_files`](/data_block.html#get_files), there is [`_get_files`](/data_block.html#_get_files) which turns all filenames inside `f` from directory `parent/p` into a list of FilePath objects. All filenames must have a suffix in `extensions`. All hidden files are ignored.
```
path_data = untar_data(URLs.MNIST_TINY)
path_data.ls()
```
With `recurse=False`, no subfolder files are made available.
```
list_FilePath_noRecurse = get_files(path_data)
list_FilePath_noRecurse
```
With `recurse=True`, all subfolder files are made available, except hidden files.
```
list_FilePath_recurse = get_files(path_data, recurse=True)
list_FilePath_recurse[:3]
list_FilePath_recurse[-2:]
```
With `extensions=['.csv']`, only files with the suffix of `.csv` are made available.
```
list_FilePath_recurse_csv = get_files(path_data, recurse=True, extensions=['.csv'])
list_FilePath_recurse_csv
```
With `include=['test']`, only files in `path_data` and its subfolder `test` are made available.
```
list_FilePath_include = get_files(path_data, recurse=True, extensions=['.png','.jpg','.jpeg'],
include=['test'])
list_FilePath_include[:3]
list_FilePath_include[-3:]
```
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(CategoryList.new)
show_doc(LabelList.new)
show_doc(CategoryList.get)
show_doc(LabelList.predict)
show_doc(ItemList.new)
show_doc(ItemList.process_one)
show_doc(MultiCategoryProcessor.process_one)
show_doc(FloatList.get)
show_doc(CategoryProcessor.process_one)
```
It basically converts `item` which is a category name to an index.
`classes`: a list of unique and sorted labels;
It creates the inner mapping from category name to index (stored in `c2i`) from the `classes`.
```
show_doc(CategoryProcessor.create_classes)
show_doc(MultiCategoryList.get)
show_doc(FloatList.new)
show_doc(FloatList.reconstruct)
show_doc(MultiCategoryList.analyze_pred)
show_doc(MultiCategoryList.reconstruct)
show_doc(CategoryList.reconstruct)
show_doc(CategoryList.analyze_pred)
show_doc(EmptyLabelList.reconstruct)
show_doc(EmptyLabelList.get)
show_doc(LabelList.databunch)
```
## New Methods - Please document or move to the undocumented section
```
show_doc(ItemList.add)
```
|
github_jupyter
|
from fastai.gen_doc.nbdoc import *
from fastai.basics import *
np.random.seed(42)
from fastai.vision import *
path = untar_data(URLs.MNIST_TINY)
tfms = get_transforms(do_flip=False)
path.ls()
(path/'train').ls()
data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=64)
data = (ImageList.from_folder(path) #Where to find the data? -> in path and its subfolders
.split_by_folder() #How to split in train/valid? -> use the folders
.label_from_folder() #How to label? -> depending on the folder of the filenames
.add_test_folder() #Optionally add a test set (here default name is test)
.transform(tfms, size=64) #Data augmentation? -> use tfms with a size of 64
.databunch()) #Finally? -> use the defaults for conversion to ImageDataBunch
data.show_batch(3, figsize=(6,6), hide_axis=False)
planet = untar_data(URLs.PLANET_TINY)
planet_tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.)
pd.read_csv(planet/"labels.csv").head()
data = ImageDataBunch.from_csv(planet, folder='train', size=128, suffix='.jpg', label_delim = ' ', ds_tfms=planet_tfms)
planet.ls()
pd.read_csv(planet/"labels.csv").head()
data = (ImageList.from_csv(planet, 'labels.csv', folder='train', suffix='.jpg')
#Where to find the data? -> in planet 'train' folder
.split_by_rand_pct()
#How to split in train/valid? -> randomly with the default 20% in valid
.label_from_df(label_delim=' ')
#How to label? -> use the second column of the csv file and split the tags by ' '
.transform(planet_tfms, size=128)
#Data augmentation? -> use tfms with a size of 128
.databunch())
#Finally -> use the defaults for conversion to databunch
data.show_batch(rows=2, figsize=(9,7))
camvid = untar_data(URLs.CAMVID_TINY)
path_lbl = camvid/'labels'
path_img = camvid/'images'
codes = np.loadtxt(camvid/'codes.txt', dtype=str); codes
get_y_fn = lambda x: path_lbl/f'{x.stem}_P{x.suffix}'
data = (SegmentationItemList.from_folder(path_img)
#Where to find the data? -> in path_img and its subfolders
.split_by_rand_pct()
#How to split in train/valid? -> randomly with the default 20% in valid
.label_from_func(get_y_fn, classes=codes)
#How to label? -> use the label function on the file name of the data
.transform(get_transforms(), tfm_y=True, size=128)
#Data augmentation? -> use tfms with a size of 128, also transform the label images
.databunch())
#Finally -> use the defaults for conversion to databunch
data.show_batch(rows=2, figsize=(7,5))
coco = untar_data(URLs.COCO_TINY)
images, lbl_bbox = get_annotations(coco/'train.json')
img2bbox = dict(zip(images, lbl_bbox))
get_y_func = lambda o:img2bbox[o.name]
data = (ObjectItemList.from_folder(coco)
#Where are the images? -> in coco and its subfolders
.split_by_rand_pct()
#How to split in train/valid? -> randomly with the default 20% in valid
.label_from_func(get_y_func)
#How to find the labels? -> use get_y_func on the file name of the data
.transform(get_transforms(), tfm_y=True)
#Data augmentation? -> Standard transforms; also transform the label images
.databunch(bs=16, collate_fn=bb_pad_collate))
#Finally we convert to a DataBunch, use a batch size of 16,
# and we use bb_pad_collate to collate the data into a mini-batch
data.show_batch(rows=2, ds_type=DatasetType.Valid, figsize=(6,6))
from fastai.text import *
imdb = untar_data(URLs.IMDB_SAMPLE)
data_lm = (TextList
.from_csv(imdb, 'texts.csv', cols='text')
#Where are the text? Column 'text' of texts.csv
.split_by_rand_pct()
#How to split it? Randomly with the default 20% in valid
.label_for_lm()
#Label it for a language model
.databunch())
#Finally we convert to a DataBunch
data_lm.show_batch()
data_clas = (TextList.from_csv(imdb, 'texts.csv', cols='text')
.split_from_df(col='is_valid')
.label_from_df(cols='label')
.databunch())
data_clas.show_batch()
from fastai.tabular import *
adult = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(adult/'adult.csv')
dep_var = 'salary'
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']
cont_names = ['education-num', 'hours-per-week', 'age', 'capital-loss', 'fnlwgt', 'capital-gain']
procs = [FillMissing, Categorify, Normalize]
data = (TabularList.from_df(df, path=adult, cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(valid_idx=range(800,1000))
.label_from_df(cols=dep_var)
.databunch())
data.show_batch()
show_doc(ItemList, title_level=3)
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
il_data = ItemList.from_folder(path_data, extensions=['.csv'])
il_data
il_data.path
il_data.items
len(il_data)
il_data[1]
il_data[:1]
il_data.add(il_data); il_data
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
itemlist = ItemList.from_folder(path_data/'test')
itemlist
itemlist = ItemList.from_folder(path_data/'test', presort=True)
itemlist
itemlist[0]
show_doc(ItemList.from_folder)
path = untar_data(URLs.MNIST_TINY)
path.ls()
ImageList.from_folder(path)
show_doc(ItemList.from_df)
path = untar_data(URLs.MNIST_SAMPLE)
path.ls()
df = pd.read_csv(path/'labels.csv')
df.head()
ImageList.from_df(df, path)
show_doc(ItemList.from_csv)
path = untar_data(URLs.MNIST_SAMPLE)
path.ls()
ImageList.from_csv(path, 'labels.csv')
show_doc(ItemList.filter_by_func)
path = untar_data(URLs.MNIST_SAMPLE)
df = pd.read_csv(path/'labels.csv')
df.head()
Path(df.name[0]).suffix
ImageList.from_df(df, path).filter_by_func(lambda fname: Path(fname).suffix == '.png')
show_doc(ItemList.filter_by_folder)
show_doc(ItemList.filter_by_rand)
path = untar_data(URLs.MNIST_SAMPLE)
ImageList.from_folder(path).filter_by_rand(0.5)
ImageList.from_folder(path)
show_doc(ItemList.to_text)
path = untar_data(URLs.MNIST_SAMPLE)
pd.read_csv(path/'labels.csv').head()
file_name = "item_list.txt"
ImageList.from_folder(path).to_text(file_name)
! cat {path/file_name} | head
show_doc(ItemList.use_partial_data)
path = untar_data(URLs.MNIST_SAMPLE)
ImageList.from_folder(path).use_partial_data(0.5)
ImageList.from_folder(path)
show_doc(ItemList.analyze_pred)
show_doc(ItemList.get)
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
il_data_base = ItemList.from_folder(path=path_data, extensions=['.png'], include=['test'])
il_data_base
il_data_base[15]
il_data_image = ImageList.from_folder(path=path_data, extensions=['.png'], include=['test'])
il_data_image
il_data_image[15]
show_doc(ItemList.new)
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
itemlist1 = ItemList.from_folder(path=path_data/'valid', extensions=['.png'])
itemlist1
itemlist1.copy_new == ['x', 'label_cls', 'path']
((itemlist1.x == itemlist1.label_cls == itemlist1.inner_df == None)
and (itemlist1.path == Path('/Users/Natsume/.fastai/data/mnist_tiny/valid')))
itemlist1.copy_new = ['x', 'label_cls', 'path', 'inner_df']
itemlist1.x = itemlist1.label_cls = itemlist1.path = itemlist1.inner_df = 'test'
itemlist2 = itemlist1.new(items=itemlist1.items)
(itemlist2.inner_df == itemlist2.x == itemlist2.label_cls == 'test'
and itemlist2.path == Path('test'))
show_doc(ItemList.reconstruct)
show_doc(ItemList.split_none)
show_doc(ItemList.split_by_rand_pct)
show_doc(ItemList.split_subsets)
show_doc(ItemList.split_by_files)
show_doc(ItemList.split_by_fname_file)
show_doc(ItemList.split_by_folder)
jekyll_note("This method looks at the folder immediately after `self.path` for `valid` and `train`.")
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
il = ItemList.from_folder(path=path_data); il
sd = il.split_by_folder(train='train', valid='valid'); sd
train_idx = il._get_by_folder(name='train')
train_idx[:5], train_idx[-5:], len(train_idx)
valid_idx = il._get_by_folder(name='valid')
valid_idx[:5], valid_idx[-5:],len(valid_idx)
show_doc(ItemList.split_by_idx)
path = untar_data(URLs.MNIST_SAMPLE)
df = pd.read_csv(path/'labels.csv')
df.head()
data = (ImageList.from_df(df, path)
.split_by_idx(list(range(1000))))
data
show_doc(ItemList.split_by_idxs)
sd = il.split_by_idxs(train_idx=train_idx, valid_idx=valid_idx); sd
show_doc(ItemList.split_by_list)
sd = il.split_by_list(train=il[train_idx], valid=il[valid_idx]); sd
show_doc(ItemList.split_by_valid_func)
show_doc(ItemList.split_from_df)
path = untar_data(URLs.MNIST_SAMPLE)
df = pd.read_csv(path/'labels.csv')
# Create a new column for is_valid
df['is_valid'] = [True]*(df.shape[0]//2) + [False]*(df.shape[0]//2)
# Randomly shuffle dataframe
df = df.reindex(np.random.permutation(df.index))
print(df.shape)
df.head()
data = (ImageList.from_df(df, path)
.split_from_df())
data
jekyll_warn("This method assumes the data has been created from a csv file or a dataframe.")
show_doc(ItemList.get_label_cls)
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
sd = ImageList.from_folder(path_data).split_by_folder('train', 'valid'); sd
labels = ['7', '3']
label_cls = sd.train.get_label_cls(labels); label_cls
labels = [7, 3]
label_cls = sd.train.get_label_cls(labels); label_cls
labels = [7.0, 3.0]
label_cls = sd.train.get_label_cls(labels); label_cls
labels = [[7, 3],]
label_cls = sd.train.get_label_cls(labels); label_cls
labels = [['7', '3'],]
label_cls = sd.train.get_label_cls(labels); label_cls
path = untar_data(URLs.MNIST_TINY)
ll = ImageList.from_folder(path).split_by_folder().label_from_folder().train
ll.train.to_csv('tmp.csv')
ll.to_df().head()
show_doc(ItemList.label_empty)
show_doc(ItemList.label_from_df)
jekyll_warn("This method only works with data objects created with either `from_csv` or `from_df` methods.")
show_doc(ItemList.label_const)
show_doc(ItemList.label_from_folder)
jekyll_note("This method looks at the last subfolder in the path to determine the classes.")
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
sd_train = ImageList.from_folder(path_data/'train').split_none()
ll_train = sd_train.label_from_folder(); ll_train
show_doc(ItemList.label_from_func)
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
sd = ImageList.from_folder(path_data).split_by_folder('train', 'valid');sd
func=lambda o: (o.parts if isinstance(o, Path) else o.split(os.path.sep))[-2]
ll = sd.label_from_func(func); ll
show_doc(ItemList.label_from_re)
show_doc(CategoryList, title_level=3)
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
ll = ImageList.from_folder(path_data).split_by_folder('train', 'valid').label_from_folder()
ll.train.y.items, ll.train.y.classes, ll.train.y[0]
cl = CategoryList(ll.train.y.items, ll.train.y.classes); cl
from fastai.vision import *
items = np.array([0, 1, 2, 1, 0])
cl = CategoryList(items, classes=['3', '7', '9']); cl
items = np.array(['3', '7', '9', '7', '3'])
classes = {'3':3, '7':7, '9':9}
cl = CategoryList(items, classes); cl
show_doc(MultiCategoryList, title_level=3)
show_doc(FloatList, title_level=3)
show_doc(EmptyLabelList, title_level=3)
show_doc(PreProcessor, title_level=3)
show_doc(PreProcessor.process_one)
show_doc(PreProcessor.process)
show_doc(CategoryProcessor, title_level=3)
show_doc(CategoryProcessor.generate_classes)
show_doc(CategoryProcessor.process)
show_doc(MultiCategoryProcessor, title_level=3)
show_doc(MultiCategoryProcessor.generate_classes)
show_doc(LabelLists.transform)
show_doc(LabelLists.add_test)
jekyll_note("Here `items` can be an `ItemList` or a collection.")
show_doc(LabelLists.add_test_folder)
jekyll_warn("In fastai the test set is unlabeled! No labels will be collected even if they are available.")
data_test = (ImageList.from_folder(path)
.split_by_folder(train='train', valid='test')
.label_from_folder()
...)
tfms = []
path = Path('data').resolve()
data = (ImageList.from_folder(path)
.split_by_pct()
.label_from_folder()
.transform(tfms)
.databunch()
.normalize() )
learn = cnn_learner(data, models.resnet50, metrics=accuracy)
learn.fit_one_cycle(5,1e-2)
# now replace the validation dataset entry with the test dataset as a new validation dataset:
# everything is exactly the same, except replacing `split_by_pct` w/ `split_by_folder`
# (or perhaps you were already using the latter, so simply switch to valid='test')
data_test = (ImageList.from_folder(path)
.split_by_folder(train='train', valid='test')
.label_from_folder()
.transform(tfms)
.databunch()
.normalize()
)
learn.validate(data_test.valid_dl)
show_doc(LabelLists.databunch)
show_doc(LabelList, title_level=3)
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
ll = ImageList.from_folder(path_data).split_by_folder('train', 'valid').label_from_folder()
ll.train.x, ll.train.y
LabelList(x=ll.train.x, y=ll.train.y)
show_doc(LabelList.export)
show_doc(LabelList.transform_y)
show_doc(LabelList.get_state)
show_doc(LabelList.load_empty)
show_doc(LabelList.load_state)
show_doc(LabelList.process)
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
sd = ImageList.from_folder(path_data).split_by_folder('train', 'valid')
sd.train = sd.train.label_from_folder(from_item_lists=True)
sd.valid = sd.valid.label_from_folder(from_item_lists=True)
sd.__class__ = LabelLists
xp,yp = sd.get_processors()
xp,yp
sd.train.process(xp, yp)
show_doc(LabelList.set_item)
show_doc(LabelList.to_df)
show_doc(LabelList.to_csv)
show_doc(LabelList.transform)
show_doc(ItemLists, title_level=3)
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
il_train = ImageList.from_folder(path_data/'train')
il_valid = ImageList.from_folder(path_data/'valid')
il_test = ImageList.from_folder(path_data/'test')
ils = ItemLists(path=path_data, train=il_train, valid=il_valid); ils
ils.test = il_test; ils
il = ImageList.from_folder(path_data); il
sd = il.split_by_folder(train='train', valid='valid'); sd
ll = sd.label_from_folder(); ll
il_train = ImageList.from_folder(path_data/'train')
sd_train = il_train.split_none(); sd_train
ll_valid_empty = sd_train.label_from_folder(); ll_valid_empty
ld_inner = sd.__getattr__('label_from_folder'); ld_inner()
show_doc(ItemLists.label_from_lists)
show_doc(ItemLists.transform)
show_doc(ItemLists.transform_y)
show_doc(LabelLists, title_level=3)
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY); path_data.ls()
il_train = ImageList.from_folder(path_data/'train')
il_valid = ImageList.from_folder(path_data/'valid')
ll_test = LabelLists(path_data, il_train, il_valid);
ll_test.test = il_valid = ImageList.from_folder(path_data/'test')
ll_test
show_doc(LabelLists.get_processors)
from fastai.vision import *
path_data = untar_data(URLs.MNIST_TINY)
sd = ImageList.from_folder(path_data).split_by_folder('train', 'valid')
sd.train = sd.train.label_from_folder(from_item_lists=True)
sd.valid = sd.valid.label_from_folder(from_item_lists=True)
sd.__class__ = LabelLists
xp,yp = sd.get_processors()
xp,yp
show_doc(LabelLists.load_empty)
show_doc(LabelLists.load_state)
show_doc(LabelLists.process)
show_doc(ItemList.process)
show_doc(get_files)
path_data = untar_data(URLs.MNIST_TINY)
path_data.ls()
list_FilePath_noRecurse = get_files(path_data)
list_FilePath_noRecurse
list_FilePath_recurse = get_files(path_data, recurse=True)
list_FilePath_recurse[:3]
list_FilePath_recurse[-2:]
list_FilePath_recurse_csv = get_files(path_data, recurse=True, extensions=['.csv'])
list_FilePath_recurse_csv
list_FilePath_include = get_files(path_data, recurse=True, extensions=['.png','.jpg','.jpeg'],
include=['test'])
list_FilePath_include[:3]
list_FilePath_include[-3:]
show_doc(CategoryList.new)
show_doc(LabelList.new)
show_doc(CategoryList.get)
show_doc(LabelList.predict)
show_doc(ItemList.new)
show_doc(ItemList.process_one)
show_doc(MultiCategoryProcessor.process_one)
show_doc(FloatList.get)
show_doc(CategoryProcessor.process_one)
show_doc(CategoryProcessor.create_classes)
show_doc(MultiCategoryList.get)
show_doc(FloatList.new)
show_doc(FloatList.reconstruct)
show_doc(MultiCategoryList.analyze_pred)
show_doc(MultiCategoryList.reconstruct)
show_doc(CategoryList.reconstruct)
show_doc(CategoryList.analyze_pred)
show_doc(EmptyLabelList.reconstruct)
show_doc(EmptyLabelList.get)
show_doc(LabelList.databunch)
show_doc(ItemList.add)
| 0.596198 | 0.988301 |
<font size="+5">#08. Hyperparameter Tuning with Cross Validation</font>
- Book + Private Lessons [Here ↗](https://sotastica.com/reservar)
- Subscribe to my [Blog ↗](https://blog.pythonassembly.com/)
- Let's keep in touch on [LinkedIn ↗](www.linkedin.com/in/jsulopz) 😄
# Load the Data
> - The goal of this dataset is
> - To predict if **bank's customers** (rows) `default` next month
> - Based on their **socio-demographical characteristics** (columns)
```
import pandas as pd
pd.set_option("display.max_columns", None)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00350/default%20of%20credit%20card%20clients.xls'
df = pd.read_excel(io=url, header=1, index_col=0)
df.sample(10)
```
# `DecisionTreeClassifier()` with Default Hyperparameters
```
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
X = df.drop(columns='default payment next month')
y = df['default payment next month']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
model = DecisionTreeClassifier()
model.fit(X_train,y_train)
```
## Accuracy
> In `train` data
```
model.score(X_train, y_train)
```
> In `test` data
```
model.score(X_test, y_test)
```
## Model Visualization
> - `plot_tree()`
```
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
```
# `DecisionTreeClassifier()` with Custom Hyperparameters
```
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/7VeUPuFGJHk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
```
> - The `model` has this hyperparameters ↓
```
model = DecisionTreeClassifier()
model.get_params()
```
## 1st Configuration
```
model = DecisionTreeClassifier(max_depth=4)
model.fit(X_train,y_train)
```
## Accuracy
> In `train` data
```
model.score(X_train, y_train)
```
> In `test` data
```
model.score(X_test, y_test)
```
## Model Visualization
> - `plot_tree()`
```
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True, fontsize=7);
```
## 2nd Configuration
```
model = DecisionTreeClassifier(criterion='entropy')
model.fit(X_train,y_train)
```
## Accuracy
> In `train` data
```
model.score(X_train, y_train)
```
> In `test` data
```
model.score(X_test, y_test)
```
## Model Visualization
> - `plot_tree()`
```
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
```
## 3rd Configuration
```
model = DecisionTreeClassifier(min_samples_leaf=50)
model.fit(X_train,y_train)
```
## Accuracy
> In `train` data
```
model.score(X_train, y_train)
```
> In `test` data
```
model.score(X_test, y_test)
```
## Model Visualization
> - `plot_tree()`
```
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
```
## 4th Configuration
```
model = DecisionTreeClassifier(min_samples_leaf=100)
model.fit(X_train,y_train)
```
## Accuracy
> In `train` data
```
model.score(X_train, y_train)
```
> In `test` data
```
model.score(X_test, y_test)
```
## Model Visualization
> - `plot_tree()`
```
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
```
## 5th Configuration
```
model = DecisionTreeClassifier(min_samples_leaf=200)
model.fit(X_train,y_train)
```
## Accuracy
> In `train` data
```
model.score(X_train, y_train)
```
> In `test` data
```
model.score(X_test, y_test)
```
## Model Visualization
> - `plot_tree()`
```
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
```
# `GridSearchCV()` to find Best Hyperparameters
> - How many scores for each fold?
<img src="src/grid_search_cross_validation.png" style="margin-top: 100px"/>
```
from sklearn.model_selection import GridSearchCV
cv = GridSearchCV()
dt = DecisionTreeClassifier()
cv_dt = GridSearchCV(estimator=dt, param_grid={'min_samples_leaf': [50, 100, 200],
'criterion': ['gini', 'entropy'],
'max_depth': [5,8,11, 15]}, verbose=2)
cv_dt.fit(X_train, y_train)
cv_dt.best_params_
```
# Other Models
## Support Vector Machines `SVC()`
```
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/efR1C6CvhmE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
from sklearn.svm import SVC
sv = SVC()
cv_sv = GridSearchCV(estimator=sv, param_grid={}, verbose=2)
cv_sv.fit(X_train, y_train)
```
## `KNeighborsClassifier()`
```
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/HVXime0nQeI" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
```
# Best Model with Best Hyperparameters
|
github_jupyter
|
import pandas as pd
pd.set_option("display.max_columns", None)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00350/default%20of%20credit%20card%20clients.xls'
df = pd.read_excel(io=url, header=1, index_col=0)
df.sample(10)
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
X = df.drop(columns='default payment next month')
y = df['default payment next month']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
model = DecisionTreeClassifier()
model.fit(X_train,y_train)
model.score(X_train, y_train)
model.score(X_test, y_test)
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/7VeUPuFGJHk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
model = DecisionTreeClassifier()
model.get_params()
model = DecisionTreeClassifier(max_depth=4)
model.fit(X_train,y_train)
model.score(X_train, y_train)
model.score(X_test, y_test)
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True, fontsize=7);
model = DecisionTreeClassifier(criterion='entropy')
model.fit(X_train,y_train)
model.score(X_train, y_train)
model.score(X_test, y_test)
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
model = DecisionTreeClassifier(min_samples_leaf=50)
model.fit(X_train,y_train)
model.score(X_train, y_train)
model.score(X_test, y_test)
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
model = DecisionTreeClassifier(min_samples_leaf=100)
model.fit(X_train,y_train)
model.score(X_train, y_train)
model.score(X_test, y_test)
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
model = DecisionTreeClassifier(min_samples_leaf=200)
model.fit(X_train,y_train)
model.score(X_train, y_train)
model.score(X_test, y_test)
from sklearn.tree import plot_tree
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
from sklearn.model_selection import GridSearchCV
cv = GridSearchCV()
dt = DecisionTreeClassifier()
cv_dt = GridSearchCV(estimator=dt, param_grid={'min_samples_leaf': [50, 100, 200],
'criterion': ['gini', 'entropy'],
'max_depth': [5,8,11, 15]}, verbose=2)
cv_dt.fit(X_train, y_train)
cv_dt.best_params_
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/efR1C6CvhmE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
from sklearn.svm import SVC
sv = SVC()
cv_sv = GridSearchCV(estimator=sv, param_grid={}, verbose=2)
cv_sv.fit(X_train, y_train)
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/HVXime0nQeI" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
| 0.68595 | 0.965381 |
# February 8th Standup
## Custom denominator for LEP
- Denominator columns in output
- No more "not_lep" or "not_fb" (foreign born), just affirmative
- Custom denominator of just people 5 and over for LEP
I chose to put these numbers in a jupyter notebook to walk through as for some reason it's brain melting. I will open the pull request after standup
```
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('../utils')
import wd_management
wd_management.set_wd_root()
from aggregate.PUMS.count_PUMS_demographics import PUMSCountDemographics
aggregator = PUMSCountDemographics(limited_PUMA=True)
df = aggregator.aggregated
```
Start with total_pop. It's easiest
```
print(df['total_pop-count'])
print()
print(df['total_pop-fraction'])
print()
print(df['total_pop-fraction-denom'])
```
Ok that all looks good
## Foreign Born
This indicator is next easiest as there is only one category and the denom should be total pop
```
print(df['fb-count'])
print()
print(df['fb-fraction'])
assert (df['fb-fraction-denom'] == df['total_pop-count']).all()
assert (df['fb-count']/df['fb-fraction-denom'] == df['fb-fraction']).all()
```
Ok great. What about foreign born by race? What should that denom be? Let's take foreign born asian as an example
```
print(df['fb-anh-count'])
print()
print(df['fb-anh-fraction'])
print()
print(df['fb-anh-fraction-denom'])
```
The denominator here is the total number of asian non-hispanic people in PUMA 4001 (greenpoint). fb-anh-pop/anh-total pop - 7615/10979 = 69% of the asian non hispanic population in greenpoint is foreign born
```
assert (df['fb-anh-fraction-denom'] == df['total_pop-anh-count']).all()
```
## Limited english proficiency
This is a little more complex as our denominator is smaller than all people
```
print(df['lep-count'])
print()
print(df['lep-fraction'])
print()
print(df['lep-fraction-denom'])
assert (df['lep-count']/df['lep-fraction-denom'] == df['lep-fraction']).all()
```
How do denomiator for LEP, total pop compare?
```
df['lep-fraction-denom']/df['total_pop-count']
```
91-94% of people are over age 5, that passes smell test
Similar question as above, what is denominator for LEP black non-hispanic?
```
print(df['lep-bnh-fraction-denom'])
print()
print(df['lep-bnh-fraction-denom'])
assert (df['lep-bnh-count']/df['lep-bnh-fraction-denom'] == df['lep-bnh-fraction']).all()
```
That looks good to me
## Age buckets
Finally look at age buckets, should be all the same pattern but doesn't hurt to take a look. Denominator is supposed to be all people
```
print(df['P16t64-count'])
print()
print(df['P16t64-fraction'])
print()
print(df['P16t64-fraction-denom'])
assert (df['P16t64-count']/df['P16t64-fraction-denom'] == df['P16t64-fraction']).all()
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('../utils')
import wd_management
wd_management.set_wd_root()
from aggregate.PUMS.count_PUMS_demographics import PUMSCountDemographics
aggregator = PUMSCountDemographics(limited_PUMA=True)
df = aggregator.aggregated
print(df['total_pop-count'])
print()
print(df['total_pop-fraction'])
print()
print(df['total_pop-fraction-denom'])
print(df['fb-count'])
print()
print(df['fb-fraction'])
assert (df['fb-fraction-denom'] == df['total_pop-count']).all()
assert (df['fb-count']/df['fb-fraction-denom'] == df['fb-fraction']).all()
print(df['fb-anh-count'])
print()
print(df['fb-anh-fraction'])
print()
print(df['fb-anh-fraction-denom'])
assert (df['fb-anh-fraction-denom'] == df['total_pop-anh-count']).all()
print(df['lep-count'])
print()
print(df['lep-fraction'])
print()
print(df['lep-fraction-denom'])
assert (df['lep-count']/df['lep-fraction-denom'] == df['lep-fraction']).all()
df['lep-fraction-denom']/df['total_pop-count']
print(df['lep-bnh-fraction-denom'])
print()
print(df['lep-bnh-fraction-denom'])
assert (df['lep-bnh-count']/df['lep-bnh-fraction-denom'] == df['lep-bnh-fraction']).all()
print(df['P16t64-count'])
print()
print(df['P16t64-fraction'])
print()
print(df['P16t64-fraction-denom'])
assert (df['P16t64-count']/df['P16t64-fraction-denom'] == df['P16t64-fraction']).all()
| 0.275519 | 0.628037 |
```
GITHUB_USERNAME = "$GITHUB_USERNAME$"
GITHUB_REF = "$GITHUB_REF$"
NOTEBOOK_TYPE = "$NOTEBOOK_TYPE$"
PYTHON_VERSION = "$PYTHON_VERSION$"
IPYTHON_VERSION = "$IPYTHON_VERSION$"
from pathlib import Path
import requests
if NOTEBOOK_TYPE == 'colab':
# utils module doesn't exist on colab VM, so get current version from GitHub
utils_module = Path('utils.py').resolve()
response = requests.get(f'https://raw.githubusercontent.com/{GITHUB_USERNAME}/davos/{GITHUB_REF}/tests/utils.py')
utils_module.write_text(response.text)
# also need to install davos locally
from utils import install_davos
install_davos(source='github', ref=GITHUB_REF, fork=GITHUB_USERNAME)
from contextlib import redirect_stdout
from io import StringIO
from subprocess import CalledProcessError
from textwrap import dedent
import davos
import IPython
from utils import mark, raises, run_tests
```
# tests for `davos.implementations.ipython_common`
```
def test_ipython_common_imports():
"""
check that functions that should've been imported from the
ipython_common module came from the right place
"""
ipy_common_funcs = (
'_check_conda_avail_helper',
'_run_shell_command_helper',
'_set_custom_showsyntaxerror'
)
for func_name in ipy_common_funcs:
func_obj = getattr(davos.implementations, func_name)
func_module = getattr(func_obj, '__module__')
assert func_module == 'davos.implementations.ipython_common', (
f"davos.implementations.{func_name} is {func_module}.{func_name}. "
f"Expected davos.implementations.ipython_common.{func_name}"
)
@mark.jupyter
def test_check_conda_avail_helper():
"""
test helper function for getting conda-related config fields
"""
expected_env_path = "/usr/share/miniconda/envs/kernel-env"
# only part of output that matters is line with environment path
expected_first_line = f"# packages in environment at {expected_env_path}:"
result_output = davos.implementations.ipython_common._check_conda_avail_helper()
result_first_line = result_output.splitlines()[0]
result_env_path = result_first_line.split()[-1].rstrip(':')
assert result_env_path == expected_env_path, (
f"Result:{result_env_path}\nExpected:{expected_env_path}"
)
def test_run_shell_command_helper():
"""test helper function for davos.core.core.run_shell_command"""
# this command should pass...
with redirect_stdout(StringIO()) as tmp_stdout:
davos.implementations.ipython_common._run_shell_command_helper('echo "test"')
stdout = tmp_stdout.getvalue().strip()
assert stdout == 'test', stdout
# ...this command should fail
with raises(CalledProcessError), redirect_stdout(StringIO()):
davos.implementations.ipython_common._run_shell_command_helper('"tset " ohce')
def test_set_custom_showsyntaxerror():
"""
check that the IPython shell's .showsyntaxerror() method was
replaced with the custom davos implementation, and that the original
is stored in the davos config
"""
orig_func = davos.implementations.ipython_common._showsyntaxerror_davos
bound_method = get_ipython().showsyntaxerror
unbound_func = bound_method.__func__
assert unbound_func is orig_func, (
f"{unbound_func.__module__}.{unbound_func.__name__}"
)
orig_method = davos.config._ipy_showsyntaxerror_orig
assert orig_method is not None
orig_qualname = f"{orig_method.__module__}.{orig_method.__qualname__}"
expected_orig_qualname = "IPython.core.interactiveshell.InteractiveShell.showsyntaxerror"
assert orig_qualname == expected_orig_qualname, orig_qualname
run_tests()
```
|
github_jupyter
|
GITHUB_USERNAME = "$GITHUB_USERNAME$"
GITHUB_REF = "$GITHUB_REF$"
NOTEBOOK_TYPE = "$NOTEBOOK_TYPE$"
PYTHON_VERSION = "$PYTHON_VERSION$"
IPYTHON_VERSION = "$IPYTHON_VERSION$"
from pathlib import Path
import requests
if NOTEBOOK_TYPE == 'colab':
# utils module doesn't exist on colab VM, so get current version from GitHub
utils_module = Path('utils.py').resolve()
response = requests.get(f'https://raw.githubusercontent.com/{GITHUB_USERNAME}/davos/{GITHUB_REF}/tests/utils.py')
utils_module.write_text(response.text)
# also need to install davos locally
from utils import install_davos
install_davos(source='github', ref=GITHUB_REF, fork=GITHUB_USERNAME)
from contextlib import redirect_stdout
from io import StringIO
from subprocess import CalledProcessError
from textwrap import dedent
import davos
import IPython
from utils import mark, raises, run_tests
def test_ipython_common_imports():
"""
check that functions that should've been imported from the
ipython_common module came from the right place
"""
ipy_common_funcs = (
'_check_conda_avail_helper',
'_run_shell_command_helper',
'_set_custom_showsyntaxerror'
)
for func_name in ipy_common_funcs:
func_obj = getattr(davos.implementations, func_name)
func_module = getattr(func_obj, '__module__')
assert func_module == 'davos.implementations.ipython_common', (
f"davos.implementations.{func_name} is {func_module}.{func_name}. "
f"Expected davos.implementations.ipython_common.{func_name}"
)
@mark.jupyter
def test_check_conda_avail_helper():
"""
test helper function for getting conda-related config fields
"""
expected_env_path = "/usr/share/miniconda/envs/kernel-env"
# only part of output that matters is line with environment path
expected_first_line = f"# packages in environment at {expected_env_path}:"
result_output = davos.implementations.ipython_common._check_conda_avail_helper()
result_first_line = result_output.splitlines()[0]
result_env_path = result_first_line.split()[-1].rstrip(':')
assert result_env_path == expected_env_path, (
f"Result:{result_env_path}\nExpected:{expected_env_path}"
)
def test_run_shell_command_helper():
"""test helper function for davos.core.core.run_shell_command"""
# this command should pass...
with redirect_stdout(StringIO()) as tmp_stdout:
davos.implementations.ipython_common._run_shell_command_helper('echo "test"')
stdout = tmp_stdout.getvalue().strip()
assert stdout == 'test', stdout
# ...this command should fail
with raises(CalledProcessError), redirect_stdout(StringIO()):
davos.implementations.ipython_common._run_shell_command_helper('"tset " ohce')
def test_set_custom_showsyntaxerror():
"""
check that the IPython shell's .showsyntaxerror() method was
replaced with the custom davos implementation, and that the original
is stored in the davos config
"""
orig_func = davos.implementations.ipython_common._showsyntaxerror_davos
bound_method = get_ipython().showsyntaxerror
unbound_func = bound_method.__func__
assert unbound_func is orig_func, (
f"{unbound_func.__module__}.{unbound_func.__name__}"
)
orig_method = davos.config._ipy_showsyntaxerror_orig
assert orig_method is not None
orig_qualname = f"{orig_method.__module__}.{orig_method.__qualname__}"
expected_orig_qualname = "IPython.core.interactiveshell.InteractiveShell.showsyntaxerror"
assert orig_qualname == expected_orig_qualname, orig_qualname
run_tests()
| 0.455441 | 0.216405 |
```
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
cv_dir = 'cv_new'
eggholder_dir = 'reg'
files_cifar10 = ['SGD_Moment_torch.csv', 'SGD_torch.csv', 'AdaHess_torch.csv', 'Adam_torch.csv', 'AdamW_torch.csv']
files_eggholder = ['SGD_Moment_Reg_torch.csv', 'SGD_Reg_torch.csv', 'Adam_Reg_torch.csv', 'AdamW_Reg_torch.csv', 'AdamHess_Reg_torch.csv']
print(len(files_cifar10))
print(len(files_eggholder))
```
### Classification Task
```
print("======== Classification Experiment on Cifar-10 dataset ========")
for filename in os.listdir(cv_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(cv_dir, filename))
print(filename)
opt = filename.split('.')[0].rsplit('_',3)[0]
fig1, (ax1, ax2) = plt.subplots(1,2, figsize=(15, 4))
ax1.plot(df['epoch'], df['val_loss'], label=opt + " validation")
ax1.plot(df['epoch'], df['loss'], label=opt + " training")
# plt.plot(df2['epoch'], df2['val_loss'], label="SGD_Reg")
ax1.set(xlabel="#Epoch", ylabel="Loss")
ax1.legend(loc="best")
ax1.title.set_text("Training and Validation loss vs. #epochs\n with {} optimizer".format(opt))
ax2.plot(df['epoch'], df['val_acc'], label=opt + " validation")
ax2.plot(df['epoch'], df['accuracy'], label=opt + " training")
# plt.plot(df2['epoch'], df2['val_loss'], label="SGD_Reg")
ax2.set(xlabel="#Epoch", ylabel="Accuracy")
ax2.legend(loc="best")
ax2.title.set_text("Training and Validation accuracy vs. #epochs\n with {} optimizer".format(opt))
plt.show()
print("======== Classification Experiment on Cifar-10 dataset ========")
plt.figure(figsize=(10,6))
for filename in os.listdir(cv_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(cv_dir, filename))
print(filename)
print("max accuracy", df['val_acc'].max())
print("min loss", df['val_loss'].min())
opt = filename.split('.')[0].rsplit('_', 3)[0]
plt.plot(df['epoch'], df['val_loss'], label=opt)
plt.xlabel("#Epoch")
plt.ylabel("Loss")
plt.legend(loc="best")
plt.title("Validation loss vs. #epochs with various optimizers on cifar-10 classification".format(opt))
plt.show()
plt.figure(figsize=(10,6))
for filename in os.listdir(cv_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(cv_dir, filename))
# print(filename)
opt = filename.split('.')[0].rsplit('_', 3)[0]
plt.plot(df['epoch'], df['val_acc'], label=opt)
plt.xlabel("#Epoch")
plt.ylabel("Accuracy")
plt.legend(loc="best")
plt.title("Validation accuracy vs. #epochs with various optimizers on cifar-10 classification".format(opt))
plt.show()
print("======== cv_new ========")
step_times = {}
for filename in os.listdir(cv_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(cv_dir, filename))
opt = filename.split('.')[0].rsplit('_', 3)[0]
t = np.average(df['opt_time'])
step_times[opt] = t
opts = list(step_times.keys())
step_time_values = list(step_times.values())
step_time_series = pd.Series(step_time_values)
# Plot the figure.
plt.figure(figsize=(7, 5))
plt.title("Average Step time of training with each optimizer")
ax = step_time_series.plot(kind='bar')
# ax.set_title('')
ax.set_xlabel('Optimizers')
ax.set_ylabel('Avg Step time (second)')
ax.set_xticklabels(opts, rotation='horizontal')
rects = ax.patches
# Make some labels.
labels = ["%2.4f" % step_time_values[i] for i in range(len(step_time_values))]
print(labels)
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height, label,
ha='center', va='bottom')
```
### Regression Task
```
print("======== Regression Experiment on Eggholder function ========")
for filename in os.listdir(eggholder_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(eggholder_dir, filename))
print(filename)
opt = filename.split('.')[0].rsplit('_', 2)[0]
plt.figure()
plt.plot(df['epoch'], df['val_loss'].apply(lambda x: np.sqrt(x)), label=opt + " validation")
plt.plot(df['epoch'], df['loss'].apply(lambda x: np.sqrt(x)), label=opt + " training")
# plt.plot(df2['epoch'], df2['val_loss'], label="SGD_Reg")
plt.xlabel("#Epoch")
plt.ylabel("Loss (RMSE)")
plt.legend(loc="best")
plt.title("Training and Validation losses vs. #epochs with {} optimizer".format(opt))
plt.show()
print("======== Regression Experiment on Eggholder Function ========")
plt.figure(figsize=(10,6))
for filename in os.listdir(eggholder_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(eggholder_dir, filename))
print(filename)
print(np.sqrt(df['val_loss'].min()))
opt = filename.split('.')[0].rsplit('_', 2)[0]
plt.plot(df['epoch'], df['val_loss'].apply(lambda x: np.sqrt(x)), label=opt)
plt.xlabel("#Epoch")
plt.ylabel("Loss (RMSE)")
plt.legend(loc="best")
plt.title("Validation loss vs. #epochs with various optimizers on regression task".format(opt))
plt.show()
# no accuracy
print("======== reg ========")
step_times = {}
for filename in os.listdir(eggholder_dir):
if filename.endswith(".csv"):
print(filename)
df = pd.read_csv(os.path.join(eggholder_dir, filename))
opt = filename.split('.')[0].rsplit('_', 2)[0]
t = np.average(df['opt_time'])
step_times[opt] = t
opts = list(step_times.keys())
step_time_values = list(step_times.values())
step_time_series = pd.Series(step_time_values)
# Plot the figure.
plt.figure(figsize=(8, 6))
plt.title("Average Step time of each optimizer")
ax = step_time_series.plot(kind='bar')
# ax.set_title('')
ax.set_xlabel('Optimizers')
ax.set_ylabel('Avg Step time (second)')
ax.set_xticklabels(opts, rotation='horizontal')
rects = ax.patches
# Make some labels.
labels = ["%2.4e" % step_time_values[i] for i in range(len(step_time_values))]
print(labels)
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height, label,
ha='center', va='bottom')
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
cv_dir = 'cv_new'
eggholder_dir = 'reg'
files_cifar10 = ['SGD_Moment_torch.csv', 'SGD_torch.csv', 'AdaHess_torch.csv', 'Adam_torch.csv', 'AdamW_torch.csv']
files_eggholder = ['SGD_Moment_Reg_torch.csv', 'SGD_Reg_torch.csv', 'Adam_Reg_torch.csv', 'AdamW_Reg_torch.csv', 'AdamHess_Reg_torch.csv']
print(len(files_cifar10))
print(len(files_eggholder))
print("======== Classification Experiment on Cifar-10 dataset ========")
for filename in os.listdir(cv_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(cv_dir, filename))
print(filename)
opt = filename.split('.')[0].rsplit('_',3)[0]
fig1, (ax1, ax2) = plt.subplots(1,2, figsize=(15, 4))
ax1.plot(df['epoch'], df['val_loss'], label=opt + " validation")
ax1.plot(df['epoch'], df['loss'], label=opt + " training")
# plt.plot(df2['epoch'], df2['val_loss'], label="SGD_Reg")
ax1.set(xlabel="#Epoch", ylabel="Loss")
ax1.legend(loc="best")
ax1.title.set_text("Training and Validation loss vs. #epochs\n with {} optimizer".format(opt))
ax2.plot(df['epoch'], df['val_acc'], label=opt + " validation")
ax2.plot(df['epoch'], df['accuracy'], label=opt + " training")
# plt.plot(df2['epoch'], df2['val_loss'], label="SGD_Reg")
ax2.set(xlabel="#Epoch", ylabel="Accuracy")
ax2.legend(loc="best")
ax2.title.set_text("Training and Validation accuracy vs. #epochs\n with {} optimizer".format(opt))
plt.show()
print("======== Classification Experiment on Cifar-10 dataset ========")
plt.figure(figsize=(10,6))
for filename in os.listdir(cv_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(cv_dir, filename))
print(filename)
print("max accuracy", df['val_acc'].max())
print("min loss", df['val_loss'].min())
opt = filename.split('.')[0].rsplit('_', 3)[0]
plt.plot(df['epoch'], df['val_loss'], label=opt)
plt.xlabel("#Epoch")
plt.ylabel("Loss")
plt.legend(loc="best")
plt.title("Validation loss vs. #epochs with various optimizers on cifar-10 classification".format(opt))
plt.show()
plt.figure(figsize=(10,6))
for filename in os.listdir(cv_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(cv_dir, filename))
# print(filename)
opt = filename.split('.')[0].rsplit('_', 3)[0]
plt.plot(df['epoch'], df['val_acc'], label=opt)
plt.xlabel("#Epoch")
plt.ylabel("Accuracy")
plt.legend(loc="best")
plt.title("Validation accuracy vs. #epochs with various optimizers on cifar-10 classification".format(opt))
plt.show()
print("======== cv_new ========")
step_times = {}
for filename in os.listdir(cv_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(cv_dir, filename))
opt = filename.split('.')[0].rsplit('_', 3)[0]
t = np.average(df['opt_time'])
step_times[opt] = t
opts = list(step_times.keys())
step_time_values = list(step_times.values())
step_time_series = pd.Series(step_time_values)
# Plot the figure.
plt.figure(figsize=(7, 5))
plt.title("Average Step time of training with each optimizer")
ax = step_time_series.plot(kind='bar')
# ax.set_title('')
ax.set_xlabel('Optimizers')
ax.set_ylabel('Avg Step time (second)')
ax.set_xticklabels(opts, rotation='horizontal')
rects = ax.patches
# Make some labels.
labels = ["%2.4f" % step_time_values[i] for i in range(len(step_time_values))]
print(labels)
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height, label,
ha='center', va='bottom')
print("======== Regression Experiment on Eggholder function ========")
for filename in os.listdir(eggholder_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(eggholder_dir, filename))
print(filename)
opt = filename.split('.')[0].rsplit('_', 2)[0]
plt.figure()
plt.plot(df['epoch'], df['val_loss'].apply(lambda x: np.sqrt(x)), label=opt + " validation")
plt.plot(df['epoch'], df['loss'].apply(lambda x: np.sqrt(x)), label=opt + " training")
# plt.plot(df2['epoch'], df2['val_loss'], label="SGD_Reg")
plt.xlabel("#Epoch")
plt.ylabel("Loss (RMSE)")
plt.legend(loc="best")
plt.title("Training and Validation losses vs. #epochs with {} optimizer".format(opt))
plt.show()
print("======== Regression Experiment on Eggholder Function ========")
plt.figure(figsize=(10,6))
for filename in os.listdir(eggholder_dir):
if filename.endswith(".csv"):
df = pd.read_csv(os.path.join(eggholder_dir, filename))
print(filename)
print(np.sqrt(df['val_loss'].min()))
opt = filename.split('.')[0].rsplit('_', 2)[0]
plt.plot(df['epoch'], df['val_loss'].apply(lambda x: np.sqrt(x)), label=opt)
plt.xlabel("#Epoch")
plt.ylabel("Loss (RMSE)")
plt.legend(loc="best")
plt.title("Validation loss vs. #epochs with various optimizers on regression task".format(opt))
plt.show()
# no accuracy
print("======== reg ========")
step_times = {}
for filename in os.listdir(eggholder_dir):
if filename.endswith(".csv"):
print(filename)
df = pd.read_csv(os.path.join(eggholder_dir, filename))
opt = filename.split('.')[0].rsplit('_', 2)[0]
t = np.average(df['opt_time'])
step_times[opt] = t
opts = list(step_times.keys())
step_time_values = list(step_times.values())
step_time_series = pd.Series(step_time_values)
# Plot the figure.
plt.figure(figsize=(8, 6))
plt.title("Average Step time of each optimizer")
ax = step_time_series.plot(kind='bar')
# ax.set_title('')
ax.set_xlabel('Optimizers')
ax.set_ylabel('Avg Step time (second)')
ax.set_xticklabels(opts, rotation='horizontal')
rects = ax.patches
# Make some labels.
labels = ["%2.4e" % step_time_values[i] for i in range(len(step_time_values))]
print(labels)
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height, label,
ha='center', va='bottom')
| 0.492188 | 0.626638 |
# Optimistic Provide - Status Quo Analysis
## Setup
### Import Dependencies
```
import sqlalchemy as sa
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
plt.rcParams['figure.figsize'] = [12, 7]
```
### Establish Database Connection
```
conn = sa.create_engine("postgresql://optprov:password@localhost:5432/optprov")
```
## Analysis
### Define Helper functions
```
def cdf(series: pd.Series) -> pd.DataFrame:
""" calculates the cumulative distribution function of the given series"""
return pd.DataFrame.from_dict({
series.name: np.append(series.sort_values(), series.max()),
"cdf": np.linspace(0, 1, len(series) + 1)
})
```
### Numbers
```
query = """
SELECT * FROM provides WHERE measurement_id = 5;
"""
df = pd.read_sql_query(query, con=conn)
len(df)
query = """
SELECT * FROM measurements WHERE id = 5;
"""
pd.read_sql_query(query, con=conn)
```
### Durations
#### Overall Provide Operation
```
column_name = "Duration in s"
query = f"""
SELECT
EXTRACT('epoch' FROM ended_at - started_at) "{column_name}"
FROM provides WHERE measurement_id = 5;
"""
df = pd.read_sql_query(query, con=conn)
cdf_df = cdf(df[column_name])
print(df[column_name].quantile(0.5))
fig, ax = plt.subplots(figsize=(12, 7))
ax.step(cdf_df[column_name], cdf_df["cdf"])
ax.set_xlabel(column_name)
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}, p50 {df[column_name].quantile(0.5):.2f}s, p90 {df[column_name].quantile(0.9):.2f}s, p99 {df[column_name].quantile(0.99):.2f}s")
ax.set_xlim(-10,90)
```
### Connections per Provide
```
column_name = "Number of Connections"
query = f"""
SELECT count(pxc.connection_id) "{column_name}"
FROM provides p
INNER JOIN provides_x_connections pxc on p.id = pxc.provide_id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
len(df)
cdf_df = cdf(df[column_name])
fig, ax = plt.subplots()
ax.step(cdf_df[column_name], cdf_df["cdf"])
ax.set_xlabel(column_name + " per Provide")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
```
### Dials per Provide
```
query = """
SELECT
sum(1) dials_count,
sum(1) FILTER ( WHERE d.error IS NULL OR d.error = 'context canceled' ) successful_dials
FROM provides p
INNER JOIN provides_x_dials pxd on p.id = pxd.provide_id
INNER JOIN dials d on pxd.dial_id = d.id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["dials_count"])
ax.step(cdf_df["dials_count"], cdf_df["cdf"], label=f"All Dials")
cdf_df = cdf(df["successful_dials"])
ax.step(cdf_df["successful_dials"], cdf_df["cdf"], label=f"Successful Dials")
ax.set_xlabel("Number of Dials per Provide")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.legend()
```
### `FIND_NODES` RPCs per Provide
```
query = """
SELECT
sum(1) find_nodes_rpcs_count,
sum(1) FILTER ( WHERE apr.error IS NULL ) successful_find_nodes_rpcs_count
FROM provides p
INNER JOIN provides_x_find_nodes_rpcs pxapr on p.id = pxapr.provide_id
INNER JOIN find_nodes_rpcs apr on pxapr.find_nodes_rpc_id = apr.id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["find_nodes_rpcs_count"])
ax.step(cdf_df["find_nodes_rpcs_count"], cdf_df["cdf"], label=f"All FIND_NODES RPCs")
cdf_df = cdf(df["successful_find_nodes_rpcs_count"])
ax.step(cdf_df["successful_find_nodes_rpcs_count"], cdf_df["cdf"], label=f"Successful FIND_NODES RPCs")
ax.set_xlabel("Number of FIND_NODES RPCs per Provide")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
```
### `ADD_PROVIDER` RPCs per Provide
```
query = """
SELECT
sum(1) add_provider_rpcs_count,
sum(1) FILTER ( WHERE apr.error IS NULL ) successful_add_provider_rpcs_count
FROM provides p
INNER JOIN provides_x_add_provider_rpcs pxapr on p.id = pxapr.provide_id
INNER JOIN add_provider_rpcs apr on pxapr.add_provider_rpc_id = apr.id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["add_provider_rpcs_count"])
ax.step(cdf_df["add_provider_rpcs_count"], cdf_df["cdf"], label=f"All ADD_PROVIDER RPCs")
cdf_df = cdf(df["successful_add_provider_rpcs_count"])
ax.step(cdf_df["successful_add_provider_rpcs_count"], cdf_df["cdf"], label=f"Successful ADD_PROVIDER RPCs")
ax.set_xlabel("Number of ADD_PROVIDER RPCs per Provide")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.set_xticks(np.arange(0, 21, step=1))
plt.legend()
plt.plot()
```
### Distances
#### Selected Provider Record Storing Peers
```
query = """
SELECT encode(apr.distance, 'hex') distance FROM provides p
INNER JOIN provides_x_add_provider_rpcs pxapr on p.id = pxapr.provide_id
INNER JOIN add_provider_rpcs apr on pxapr.add_provider_rpc_id = apr.id
WHERE p.measurement_id = 5
"""
df = pd.read_sql_query(query, con=conn)
df["distance_normed"] = df.apply(lambda row: int(row["distance"], base=16) / (2 ** 256 - 1), axis=1)
df["distance_normed_pct"] = df.apply(lambda row: 100 * row["distance_normed"], axis=1)
fig, ax = plt.subplots()
cdf_df = cdf(df["distance_normed_pct"])
ax.step(cdf_df["distance_normed_pct"], cdf_df["cdf"], label=f"All Distances")
ax.set_xlabel("Normed XOR Distance in %")
ax.set_ylabel("CDF")
ax.set_title(f"Number of ADD_PROVIDER RPCs {len(df)}")
ax.legend()
plt.hist(df["distance_normed_pct"], bins=30)
plt.plot()
```
### Distribution of Average Provider Record storing peer distance per Provide
```
query = """
SELECT array_agg(encode(apr.distance, 'hex')) distances
FROM provides p
INNER JOIN provides_x_add_provider_rpcs pxapr on p.id = pxapr.provide_id
INNER JOIN add_provider_rpcs apr on pxapr.add_provider_rpc_id = apr.id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
df["distance_normed_pct_avg"] = df.apply(
lambda row: np.average(list(map(lambda distance: 100 * int(distance, base=16) / (2 ** 256 - 1), row["distances"]))),
axis=1)
df["distance_normed_pct_median"] = df.apply(
lambda row: np.median(list(map(lambda distance: 100 * int(distance, base=16) / (2 ** 256 - 1), row["distances"]))),
axis=1)
fig, ax = plt.subplots()
cdf_df = cdf(df["distance_normed_pct_avg"])
ax.step(cdf_df["distance_normed_pct_avg"], cdf_df["cdf"], label=f"Average Distance per Provide")
# cdf_df = cdf(df["distance_normed_pct_median"])
# ax.step(cdf_df["distance_normed_pct_median"], cdf_df["cdf"], label=f"Median Distance per Provide")
ax.set_xlabel("Normed XOR Distance in %")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.legend()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[20, 7])
ax1.hist(df["distance_normed_pct_avg"], bins=30, label=f"Average Distance per Provide")
ax1.set_xlabel("Normed XOR Distance in %")
ax1.set_ylabel("Count")
ax1.set_title(f"Number of Provide Operations {len(df)}")
ax1.legend()
ax2.hist(df["distance_normed_pct_median"], bins=30, label=f"Median Distance per Provide")
ax2.set_xlabel("Normed XOR Distance in %")
ax2.set_ylabel("Count")
ax2.set_title(f"Number of Provide Operations {len(df)}")
ax2.legend()
```
## Follow-Up Analysis
```
query = """
SELECT p.id,
EXTRACT('epoch' FROM max(fnr.ended_at) FILTER ( WHERE fnr.query_id != '00000000-0000-0000-0000-000000000000' ) -
min(fnr.started_at)
FILTER ( WHERE fnr.query_id != '00000000-0000-0000-0000-000000000000' )) "dht_walk",
coalesce(EXTRACT('epoch' FROM
max(fnr.ended_at) FILTER ( WHERE fnr.query_id = '00000000-0000-0000-0000-000000000000' ) -
min(fnr.started_at) FILTER ( WHERE fnr.query_id = '00000000-0000-0000-0000-000000000000' )),
0) "follow_up"
FROM provides p
INNER JOIN provides_x_find_nodes_rpcs pxfnr on p.id = pxfnr.provide_id
INNER JOIN find_nodes_rpcs fnr on fnr.id = pxfnr.find_nodes_rpc_id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["dht_walk"])
ax.step(cdf_df["dht_walk"], cdf_df["cdf"], label=f"DHT Walk")
cdf_df = cdf(df["follow_up"])
ax.step(cdf_df["follow_up"], cdf_df["cdf"], label=f"Follow Up")
ax.set_xlabel("Duration in s")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.legend()
df['ratio'] = df.apply(lambda row: row["follow_up"] / row["dht_walk"], axis=1)
fig, ax = plt.subplots()
cdf_df = cdf(df["ratio"])
ax.step(cdf_df["ratio"], cdf_df["cdf"], label=f"Follow-Up to DHT Walk Ratio")
ax.set_xlabel("Ratio Follow-Up Duration/DHT Walk Duration")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.legend()
```
## Routing Table Analysis
```
query = """
SELECT EXTRACT('epoch' FROM p.ended_at - p.started_at) "provide_duration",
PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY EXTRACT('epoch' FROM rts.created_at - rte.added_at)) "median_age_in_s"
FROM provides p
INNER JOIN routing_table_snapshots rts on rts.id = p.initial_routing_table_id
INNER JOIN routing_table_entries rte on rts.id = rte.routing_table_snapshot_id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
ax.scatter(df["median_age_in_s"], df["provide_duration"])
m, c = np.polyfit(df["median_age_in_s"], df["provide_duration"], 1)
ax.plot(df["median_age_in_s"], m * df["median_age_in_s"] + c, color="orange", label="linear fit")
ax.set_xlabel("Median Routing Table Age in s")
ax.set_ylabel("Provide Duration in s")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.legend()
```
## Discovery Analysis
```
query = """
WITH providers AS (
SELECT p.id provide_id, p.started_at, apr.remote_id
FROM provides p
INNER JOIN provides_x_add_provider_rpcs pxapr on p.id = pxapr.provide_id
INNER JOIN add_provider_rpcs apr on pxapr.add_provider_rpc_id = apr.id
WHERE p.measurement_id = 5
),
referrers AS (
SELECT p.*, ps.referrer_id
FROM providers p
INNER JOIN provides_x_peer_states pxps on p.provide_id = pxps.provide_id
INNER JOIN peer_states ps on pxps.peer_state_id = ps.id
WHERE ps.peer_id = p.remote_id
AND pxps.provide_id = p.provide_id
),
find_nodes AS (
SELECT r.*,
coalesce(EXTRACT('epoch' FROM (SELECT min(fnr.ended_at)
FROM find_nodes_rpcs fnr
INNER JOIN provides_x_find_nodes_rpcs pxfnr on fnr.id = pxfnr.find_nodes_rpc_id
WHERE pxfnr.provide_id = r.provide_id
AND fnr.remote_id = r.referrer_id) - r.started_at), 0) delay_in_s
FROM referrers r
)
SELECT fn.provide_id, fn.remote_id, fn.referrer_id, fn.delay_in_s
FROM find_nodes fn
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["delay_in_s"])
ax.step(cdf_df["delay_in_s"], cdf_df["cdf"])
ax.set_xlabel("Time to Discover eventually selected Peers in s")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
```
## Final Routing Table Overlap
```
query = """
WITH cte AS (
SELECT p.id provide_id, ps.peer_id, count(DISTINCT ps.query_id) query_count
FROM provides p
INNER JOIN provides_x_peer_states pxps on p.id = pxps.provide_id
INNER JOIN peer_states ps on ps.id = pxps.peer_state_id
WHERE p.measurement_id = 5
GROUP BY p.id, ps.peer_id
)
SELECT cte.provide_id, count(cte.query_count)
FROM cte
WHERE cte.query_count > 1
GROUP BY cte.provide_id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["count"])
ax.step(cdf_df["count"], cdf_df["cdf"])
ax.set_ylabel("CDF")
ax.set_title(f"Query Overlap after reaching threshold of 20")
```
|
github_jupyter
|
import sqlalchemy as sa
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
plt.rcParams['figure.figsize'] = [12, 7]
conn = sa.create_engine("postgresql://optprov:password@localhost:5432/optprov")
def cdf(series: pd.Series) -> pd.DataFrame:
""" calculates the cumulative distribution function of the given series"""
return pd.DataFrame.from_dict({
series.name: np.append(series.sort_values(), series.max()),
"cdf": np.linspace(0, 1, len(series) + 1)
})
query = """
SELECT * FROM provides WHERE measurement_id = 5;
"""
df = pd.read_sql_query(query, con=conn)
len(df)
query = """
SELECT * FROM measurements WHERE id = 5;
"""
pd.read_sql_query(query, con=conn)
column_name = "Duration in s"
query = f"""
SELECT
EXTRACT('epoch' FROM ended_at - started_at) "{column_name}"
FROM provides WHERE measurement_id = 5;
"""
df = pd.read_sql_query(query, con=conn)
cdf_df = cdf(df[column_name])
print(df[column_name].quantile(0.5))
fig, ax = plt.subplots(figsize=(12, 7))
ax.step(cdf_df[column_name], cdf_df["cdf"])
ax.set_xlabel(column_name)
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}, p50 {df[column_name].quantile(0.5):.2f}s, p90 {df[column_name].quantile(0.9):.2f}s, p99 {df[column_name].quantile(0.99):.2f}s")
ax.set_xlim(-10,90)
column_name = "Number of Connections"
query = f"""
SELECT count(pxc.connection_id) "{column_name}"
FROM provides p
INNER JOIN provides_x_connections pxc on p.id = pxc.provide_id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
len(df)
cdf_df = cdf(df[column_name])
fig, ax = plt.subplots()
ax.step(cdf_df[column_name], cdf_df["cdf"])
ax.set_xlabel(column_name + " per Provide")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
query = """
SELECT
sum(1) dials_count,
sum(1) FILTER ( WHERE d.error IS NULL OR d.error = 'context canceled' ) successful_dials
FROM provides p
INNER JOIN provides_x_dials pxd on p.id = pxd.provide_id
INNER JOIN dials d on pxd.dial_id = d.id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["dials_count"])
ax.step(cdf_df["dials_count"], cdf_df["cdf"], label=f"All Dials")
cdf_df = cdf(df["successful_dials"])
ax.step(cdf_df["successful_dials"], cdf_df["cdf"], label=f"Successful Dials")
ax.set_xlabel("Number of Dials per Provide")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.legend()
query = """
SELECT
sum(1) find_nodes_rpcs_count,
sum(1) FILTER ( WHERE apr.error IS NULL ) successful_find_nodes_rpcs_count
FROM provides p
INNER JOIN provides_x_find_nodes_rpcs pxapr on p.id = pxapr.provide_id
INNER JOIN find_nodes_rpcs apr on pxapr.find_nodes_rpc_id = apr.id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["find_nodes_rpcs_count"])
ax.step(cdf_df["find_nodes_rpcs_count"], cdf_df["cdf"], label=f"All FIND_NODES RPCs")
cdf_df = cdf(df["successful_find_nodes_rpcs_count"])
ax.step(cdf_df["successful_find_nodes_rpcs_count"], cdf_df["cdf"], label=f"Successful FIND_NODES RPCs")
ax.set_xlabel("Number of FIND_NODES RPCs per Provide")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
query = """
SELECT
sum(1) add_provider_rpcs_count,
sum(1) FILTER ( WHERE apr.error IS NULL ) successful_add_provider_rpcs_count
FROM provides p
INNER JOIN provides_x_add_provider_rpcs pxapr on p.id = pxapr.provide_id
INNER JOIN add_provider_rpcs apr on pxapr.add_provider_rpc_id = apr.id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["add_provider_rpcs_count"])
ax.step(cdf_df["add_provider_rpcs_count"], cdf_df["cdf"], label=f"All ADD_PROVIDER RPCs")
cdf_df = cdf(df["successful_add_provider_rpcs_count"])
ax.step(cdf_df["successful_add_provider_rpcs_count"], cdf_df["cdf"], label=f"Successful ADD_PROVIDER RPCs")
ax.set_xlabel("Number of ADD_PROVIDER RPCs per Provide")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.set_xticks(np.arange(0, 21, step=1))
plt.legend()
plt.plot()
query = """
SELECT encode(apr.distance, 'hex') distance FROM provides p
INNER JOIN provides_x_add_provider_rpcs pxapr on p.id = pxapr.provide_id
INNER JOIN add_provider_rpcs apr on pxapr.add_provider_rpc_id = apr.id
WHERE p.measurement_id = 5
"""
df = pd.read_sql_query(query, con=conn)
df["distance_normed"] = df.apply(lambda row: int(row["distance"], base=16) / (2 ** 256 - 1), axis=1)
df["distance_normed_pct"] = df.apply(lambda row: 100 * row["distance_normed"], axis=1)
fig, ax = plt.subplots()
cdf_df = cdf(df["distance_normed_pct"])
ax.step(cdf_df["distance_normed_pct"], cdf_df["cdf"], label=f"All Distances")
ax.set_xlabel("Normed XOR Distance in %")
ax.set_ylabel("CDF")
ax.set_title(f"Number of ADD_PROVIDER RPCs {len(df)}")
ax.legend()
plt.hist(df["distance_normed_pct"], bins=30)
plt.plot()
query = """
SELECT array_agg(encode(apr.distance, 'hex')) distances
FROM provides p
INNER JOIN provides_x_add_provider_rpcs pxapr on p.id = pxapr.provide_id
INNER JOIN add_provider_rpcs apr on pxapr.add_provider_rpc_id = apr.id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
df["distance_normed_pct_avg"] = df.apply(
lambda row: np.average(list(map(lambda distance: 100 * int(distance, base=16) / (2 ** 256 - 1), row["distances"]))),
axis=1)
df["distance_normed_pct_median"] = df.apply(
lambda row: np.median(list(map(lambda distance: 100 * int(distance, base=16) / (2 ** 256 - 1), row["distances"]))),
axis=1)
fig, ax = plt.subplots()
cdf_df = cdf(df["distance_normed_pct_avg"])
ax.step(cdf_df["distance_normed_pct_avg"], cdf_df["cdf"], label=f"Average Distance per Provide")
# cdf_df = cdf(df["distance_normed_pct_median"])
# ax.step(cdf_df["distance_normed_pct_median"], cdf_df["cdf"], label=f"Median Distance per Provide")
ax.set_xlabel("Normed XOR Distance in %")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.legend()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[20, 7])
ax1.hist(df["distance_normed_pct_avg"], bins=30, label=f"Average Distance per Provide")
ax1.set_xlabel("Normed XOR Distance in %")
ax1.set_ylabel("Count")
ax1.set_title(f"Number of Provide Operations {len(df)}")
ax1.legend()
ax2.hist(df["distance_normed_pct_median"], bins=30, label=f"Median Distance per Provide")
ax2.set_xlabel("Normed XOR Distance in %")
ax2.set_ylabel("Count")
ax2.set_title(f"Number of Provide Operations {len(df)}")
ax2.legend()
query = """
SELECT p.id,
EXTRACT('epoch' FROM max(fnr.ended_at) FILTER ( WHERE fnr.query_id != '00000000-0000-0000-0000-000000000000' ) -
min(fnr.started_at)
FILTER ( WHERE fnr.query_id != '00000000-0000-0000-0000-000000000000' )) "dht_walk",
coalesce(EXTRACT('epoch' FROM
max(fnr.ended_at) FILTER ( WHERE fnr.query_id = '00000000-0000-0000-0000-000000000000' ) -
min(fnr.started_at) FILTER ( WHERE fnr.query_id = '00000000-0000-0000-0000-000000000000' )),
0) "follow_up"
FROM provides p
INNER JOIN provides_x_find_nodes_rpcs pxfnr on p.id = pxfnr.provide_id
INNER JOIN find_nodes_rpcs fnr on fnr.id = pxfnr.find_nodes_rpc_id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["dht_walk"])
ax.step(cdf_df["dht_walk"], cdf_df["cdf"], label=f"DHT Walk")
cdf_df = cdf(df["follow_up"])
ax.step(cdf_df["follow_up"], cdf_df["cdf"], label=f"Follow Up")
ax.set_xlabel("Duration in s")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.legend()
df['ratio'] = df.apply(lambda row: row["follow_up"] / row["dht_walk"], axis=1)
fig, ax = plt.subplots()
cdf_df = cdf(df["ratio"])
ax.step(cdf_df["ratio"], cdf_df["cdf"], label=f"Follow-Up to DHT Walk Ratio")
ax.set_xlabel("Ratio Follow-Up Duration/DHT Walk Duration")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.legend()
query = """
SELECT EXTRACT('epoch' FROM p.ended_at - p.started_at) "provide_duration",
PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY EXTRACT('epoch' FROM rts.created_at - rte.added_at)) "median_age_in_s"
FROM provides p
INNER JOIN routing_table_snapshots rts on rts.id = p.initial_routing_table_id
INNER JOIN routing_table_entries rte on rts.id = rte.routing_table_snapshot_id
WHERE p.measurement_id = 5
GROUP BY p.id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
ax.scatter(df["median_age_in_s"], df["provide_duration"])
m, c = np.polyfit(df["median_age_in_s"], df["provide_duration"], 1)
ax.plot(df["median_age_in_s"], m * df["median_age_in_s"] + c, color="orange", label="linear fit")
ax.set_xlabel("Median Routing Table Age in s")
ax.set_ylabel("Provide Duration in s")
ax.set_title(f"Number of Provide Operations {len(df)}")
ax.legend()
query = """
WITH providers AS (
SELECT p.id provide_id, p.started_at, apr.remote_id
FROM provides p
INNER JOIN provides_x_add_provider_rpcs pxapr on p.id = pxapr.provide_id
INNER JOIN add_provider_rpcs apr on pxapr.add_provider_rpc_id = apr.id
WHERE p.measurement_id = 5
),
referrers AS (
SELECT p.*, ps.referrer_id
FROM providers p
INNER JOIN provides_x_peer_states pxps on p.provide_id = pxps.provide_id
INNER JOIN peer_states ps on pxps.peer_state_id = ps.id
WHERE ps.peer_id = p.remote_id
AND pxps.provide_id = p.provide_id
),
find_nodes AS (
SELECT r.*,
coalesce(EXTRACT('epoch' FROM (SELECT min(fnr.ended_at)
FROM find_nodes_rpcs fnr
INNER JOIN provides_x_find_nodes_rpcs pxfnr on fnr.id = pxfnr.find_nodes_rpc_id
WHERE pxfnr.provide_id = r.provide_id
AND fnr.remote_id = r.referrer_id) - r.started_at), 0) delay_in_s
FROM referrers r
)
SELECT fn.provide_id, fn.remote_id, fn.referrer_id, fn.delay_in_s
FROM find_nodes fn
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["delay_in_s"])
ax.step(cdf_df["delay_in_s"], cdf_df["cdf"])
ax.set_xlabel("Time to Discover eventually selected Peers in s")
ax.set_ylabel("CDF")
ax.set_title(f"Number of Provide Operations {len(df)}")
query = """
WITH cte AS (
SELECT p.id provide_id, ps.peer_id, count(DISTINCT ps.query_id) query_count
FROM provides p
INNER JOIN provides_x_peer_states pxps on p.id = pxps.provide_id
INNER JOIN peer_states ps on ps.id = pxps.peer_state_id
WHERE p.measurement_id = 5
GROUP BY p.id, ps.peer_id
)
SELECT cte.provide_id, count(cte.query_count)
FROM cte
WHERE cte.query_count > 1
GROUP BY cte.provide_id
"""
df = pd.read_sql_query(query, con=conn)
fig, ax = plt.subplots()
cdf_df = cdf(df["count"])
ax.step(cdf_df["count"], cdf_df["cdf"])
ax.set_ylabel("CDF")
ax.set_title(f"Query Overlap after reaching threshold of 20")
| 0.436022 | 0.721719 |
# Song "Spleeter" Demo
<img src="https://raw.githubusercontent.com/deezer/spleeter/master/images/spleeter_logo.png" width="250px"/>
### A click-through web page by [fat-tire](https://twitter.com/fat__tire)
----
*Note: All credit for this goes to [Deezer](https://developers.deezer.com/) who created and trained the network and [provided it to the public](https://github.com/deezer/spleeter). I'm just putting it in a slightly-easier click-through format within a Google Collaboratory web page. [Deezer's github page](https://github.com/deezer/spleeter) also offers a [similar demo](https://colab.research.google.com/github/deezer/spleeter/blob/master/spleeter.ipynb), but this one here makes it very easy to upload your own songs + download the results. It also uses the GPU version for faster processing.*
**Spleeter** is a deep learning network that can separate (split, or "spleet") a singer(s) vocals from the music. You give it a "song.mp3", and it gives you two mp3s out: "vocals.mp3" and "accompaniment.mp3". Spleeter supports various dimensions of separation, but the demo here just spleets into two parts (or "stems"). Deezer provides more details [here](https://github.com/deezer/spleeter/blob/master/README.md), including instructions for the other audio separation stems: vocals, bass, drums, piano, & other.
Potential uses for this might be to create tracks for:
* Parodies
* Karaoke
* Remixes
* Education (like, what ARE the lyrics in [Louie Louie](https://www.youtube.com/watch?v=xKt75jUuKJY)? Is there a [hidden F-bomb in Hey Jude](https://www.youtube.com/watch?v=qyo1ScwcDAc)?)
* Rehearsal
Instructions: Have an mp3 standing by with your favorite song (one you are authorized/licensed to use). Now click through the "play" buttons below. I'll walk you through the steps.
_Disclaimer_: Use entirely at your own risk. Do not violate copyrights. Make sure you are authorized/licensed to use your audio with Spleeter. See Deezer's original license [here](https://github.com/deezer/spleeter/blob/master/LICENSE). Additions/changes by fat-tire in this collabatory document are also offered under the [MIT license](https://github.com/fat-tire/SongSpleeterColab/blob/master/LICENSE). Feel free to fork or save a copy of this Colab in Drive or Github (it's even an option under File in the menu above) or print out and press with flowers into your journal.
###STEP ONE:
This first section will download all the prerequisites and set everything up. Press the play button below and then wait about 10-15 minutes.
⬇ Press the play button below
```
# we need pydub for later
! pip3 install pydub
# and this
from google.colab import files
import IPython.display as ipd
import sys
# get Anaconda...
! wget -c https://repo.anaconda.com/archive/Anaconda3-2019.10-Linux-x86_64.sh
! chmod +x Anaconda3-2019.10-Linux-x86_64.sh
! bash ./Anaconda3-2019.10-Linux-x86_64.sh -b -f -p /usr/local
sys.path.append('/usr/local/lib/python3.6/site-packages/')
# get spleeter dependencies
! sudo git clone https://github.com/deezer/spleeter
! wget https://github.com/deezer/spleeter/blob/master/conda/spleeter-gpu.yaml
# set stuff up. This takes a while.
! conda env create -f spleeter/conda/spleeter-gpu.yaml
# this goes fast.
! source activate spleeter-gpu
! export PATH=$"/usr/local/envs/spleeter-gpu/bin:$PATH"
```
Now that we're done downloading and setting everything up, it's time for...
### STEP TWO
Upload Your "song.mp3!"!
**On your computer, rename your song "song.mp3". This is important, and I'm making you do this because I am too lazy to check the file's name.**
(If you don't have a "song.mp3" and want to use the original demo example, you can skip this and go to STEP THREE below.)
Press the `play button` to the left of the code section directly below, then select `Choose Files` and select your "song.mp3" on your computer. The file should be uploaded, then moved to the right place.
⬇ get your "song.mp3" file ready and press here
```
! rm -f song.mp3 # remove the song.mp3 if it exists
files.upload()
! echo "Moving song.mp3...."
! mv song.mp3 spleeter/audio_example.mp3
```
The following line will split the file into the two parts (voice and accompaniment), which will be outputted as "wav" files. (Don't worry, we'll convert it to mp3 before you download.)
⬇ Start Spleetin'!
```
! /usr/local/envs/spleeter-gpu/bin/spleeter separate -i spleeter/audio_example.mp3 -p spleeter:2stems -o output
```
### STEP THREE
That went fast, huh?
⬇ Now let's convert the output sound wav files-> mp3.
```
from pydub import AudioSegment
sound = AudioSegment.from_wav("output/audio_example/vocals.wav")
sound.export("output/audio_example/vocals.mp3", format="mp3")
sound = AudioSegment.from_wav("output/audio_example/accompaniment.wav")
sound.export("output/audio_example/accompaniment.mp3", format="mp3")
```
### STEP FOUR
Let's preview just the vocals...
```
ipd.Audio('output/audio_example/vocals.mp3')
```
Now let's preview just the music...
```
ipd.Audio('output/audio_example/accompaniment.mp3')
```
Again, here is the original...
```
ipd.Audio('spleeter/audio_example.mp3')
```
### STEP FIVE
To download, press the ⋮(three vertical dots) in the previews above and choose `Download`. Or, you can try these commands below. (Some users may experience "fetch errors" with them, however. Hopefully a fix will show up soon.)
```
files.download('output/audio_example/vocals.mp3')
```
Download the music below.
```
files.download('output/audio_example/accompaniment.mp3')
```
## THAT'S IT!
To start again, just upload a new "song.mp3" in STEP TWO above and just re-do the subsequent steps.
If you liked this page, you can fork it and make changes. Dont' forget to smash that Like button-- wait, there's no Like button. Well subscribe to my channel. No channel either? Dammit. Well, umm.... you can follow me on Twitter I guess at the link below.
--[fat-tire](https://twitter.com/fat__tire)
Here's the citation Deezer requested. No idea how this is supposed to be formatted, but here it is raw:
```
@misc{spleeter2019,
title={Spleeter: A Fast And State-of-the Art Music Source Separation Tool With Pre-trained Models},
author={Romain Hennequin and Anis Khlif and Felix Voituret and Manuel Moussallam},
howpublished={Late-Breaking/Demo ISMIR 2019},
month={November},
note={Deezer Research},
year={2019}
}
```
As mentioned, you can save a copy [of this colab page](https://colab.research.google.com/github/fat-tire/SongSpleeterColab/blob/master/Song_Spleeter_Colab.ipynb) to modify yourself. Or, here's a [GitHub repository link](https://github.com/fat-tire/SongSpleeterColab). Pull requests are welcome!
Ideas: you might make a version that isolates the piano tracks or processes multiple songs at a time. Or maybe you give it a URL from YouTube (using ONLY songs that you owned or are licensed to use of course) and it splits them for you. A [mobile music app](https://www.tensorflow.org/lite/guide/android) that only plays karaoke singalong tracks would be cool... I'm sure you'll think of neat stuff to try.
Enjoy!
|
github_jupyter
|
# we need pydub for later
! pip3 install pydub
# and this
from google.colab import files
import IPython.display as ipd
import sys
# get Anaconda...
! wget -c https://repo.anaconda.com/archive/Anaconda3-2019.10-Linux-x86_64.sh
! chmod +x Anaconda3-2019.10-Linux-x86_64.sh
! bash ./Anaconda3-2019.10-Linux-x86_64.sh -b -f -p /usr/local
sys.path.append('/usr/local/lib/python3.6/site-packages/')
# get spleeter dependencies
! sudo git clone https://github.com/deezer/spleeter
! wget https://github.com/deezer/spleeter/blob/master/conda/spleeter-gpu.yaml
# set stuff up. This takes a while.
! conda env create -f spleeter/conda/spleeter-gpu.yaml
# this goes fast.
! source activate spleeter-gpu
! export PATH=$"/usr/local/envs/spleeter-gpu/bin:$PATH"
! rm -f song.mp3 # remove the song.mp3 if it exists
files.upload()
! echo "Moving song.mp3...."
! mv song.mp3 spleeter/audio_example.mp3
! /usr/local/envs/spleeter-gpu/bin/spleeter separate -i spleeter/audio_example.mp3 -p spleeter:2stems -o output
from pydub import AudioSegment
sound = AudioSegment.from_wav("output/audio_example/vocals.wav")
sound.export("output/audio_example/vocals.mp3", format="mp3")
sound = AudioSegment.from_wav("output/audio_example/accompaniment.wav")
sound.export("output/audio_example/accompaniment.mp3", format="mp3")
ipd.Audio('output/audio_example/vocals.mp3')
ipd.Audio('output/audio_example/accompaniment.mp3')
ipd.Audio('spleeter/audio_example.mp3')
files.download('output/audio_example/vocals.mp3')
files.download('output/audio_example/accompaniment.mp3')
@misc{spleeter2019,
title={Spleeter: A Fast And State-of-the Art Music Source Separation Tool With Pre-trained Models},
author={Romain Hennequin and Anis Khlif and Felix Voituret and Manuel Moussallam},
howpublished={Late-Breaking/Demo ISMIR 2019},
month={November},
note={Deezer Research},
year={2019}
}
| 0.333178 | 0.881053 |
# Machine Learning Engineer Nanodegree
## Model Evaluation & Validation
## Project: Predicting Boston Housing Prices
Welcome to the first project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
## Getting Started
In this project, you will evaluate the performance and predictive power of a model that has been trained and tested on data collected from homes in suburbs of Boston, Massachusetts. A model trained on this data that is seen as a *good fit* could then be used to make certain predictions about a home — in particular, its monetary value. This model would prove to be invaluable for someone like a real estate agent who could make use of such information on a daily basis.
The dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Housing). The Boston housing data was collected in 1978 and each of the 506 entries represent aggregated data about 14 features for homes from various suburbs in Boston, Massachusetts. For the purposes of this project, the following preprocessing steps have been made to the dataset:
- 16 data points have an `'MEDV'` value of 50.0. These data points likely contain **missing or censored values** and have been removed.
- 1 data point has an `'RM'` value of 8.78. This data point can be considered an **outlier** and has been removed.
- The features `'RM'`, `'LSTAT'`, `'PTRATIO'`, and `'MEDV'` are essential. The remaining **non-relevant features** have been excluded.
- The feature `'MEDV'` has been **multiplicatively scaled** to account for 35 years of market inflation.
Run the code cell below to load the Boston housing dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.
```
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from sklearn.model_selection import ShuffleSplit
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the Boston housing dataset
data = pd.read_csv('housing.csv')
prices = data['MEDV']
features = data.drop('MEDV', axis = 1)
# Success
print("Boston housing dataset has {} data points with {} variables each.".format(*data.shape))
```
## Data Exploration
In this first section of this project, you will make a cursory investigation about the Boston housing data and provide your observations. Familiarizing yourself with the data through an explorative process is a fundamental practice to help you better understand and justify your results.
Since the main goal of this project is to construct a working model which has the capability of predicting the value of houses, we will need to separate the dataset into **features** and the **target variable**. The **features**, `'RM'`, `'LSTAT'`, and `'PTRATIO'`, give us quantitative information about each data point. The **target variable**, `'MEDV'`, will be the variable we seek to predict. These are stored in `features` and `prices`, respectively.
### Implementation: Calculate Statistics
For your very first coding implementation, you will calculate descriptive statistics about the Boston housing prices. Since `numpy` has already been imported for you, use this library to perform the necessary calculations. These statistics will be extremely important later on to analyze various prediction results from the constructed model.
In the code cell below, you will need to implement the following:
- Calculate the minimum, maximum, mean, median, and standard deviation of `'MEDV'`, which is stored in `prices`.
- Store each calculation in their respective variable.
```
# TODO: Minimum price of the data
minimum_price = np.min(prices)
# TODO: Maximum price of the data
maximum_price = np.max(prices)
# TODO: Mean price of the data
mean_price = np.mean(prices)
# TODO: Median price of the data
median_price = np.median(prices)
# TODO: Standard deviation of prices of the data
std_price = np.std(prices)
# Show the calculated statistics
print("Statistics for Boston housing dataset:\n")
print("Minimum price: ${}".format(minimum_price))
print("Maximum price: ${}".format(maximum_price))
print("Mean price: ${}".format(mean_price))
print("Median price ${}".format(median_price))
print("Standard deviation of prices: ${}".format(std_price))
```
### Question 1 - Feature Observation
As a reminder, we are using three features from the Boston housing dataset: `'RM'`, `'LSTAT'`, and `'PTRATIO'`. For each data point (neighborhood):
- `'RM'` is the average number of rooms among homes in the neighborhood.
- `'LSTAT'` is the percentage of homeowners in the neighborhood considered "lower class" (working poor).
- `'PTRATIO'` is the ratio of students to teachers in primary and secondary schools in the neighborhood.
** Using your intuition, for each of the three features above, do you think that an increase in the value of that feature would lead to an **increase** in the value of `'MEDV'` or a **decrease** in the value of `'MEDV'`? Justify your answer for each.**
**Hint:** This problem can phrased using examples like below.
* Would you expect a home that has an `'RM'` value(number of rooms) of 6 be worth more or less than a home that has an `'RM'` value of 7?
* Would you expect a neighborhood that has an `'LSTAT'` value(percent of lower class workers) of 15 have home prices be worth more or less than a neighborhood that has an `'LSTAT'` value of 20?
* Would you expect a neighborhood that has an `'PTRATIO'` value(ratio of students to teachers) of 10 have home prices be worth more or less than a neighborhood that has an `'PTRATIO'` value of 15?
<span class="answer">**Answer:**
#### RM
- I think that an increase in the number of rooms will increase the value of `MEDV` because larger houses should be more expensive. Hence the guess for a positive relationship between these two.
- But I don't think that this increase will be that much because house prices in a neighborhood are often similar by square foot.
- In addition I think that this will depend on the district of boston. Especially in larger cities it might be that a large house in one district is cheaper than a smaller house in another district.
#### LSTAT
- In my intuition there will be a decrease in the value of `MEDV` if the percentage of `LSTAT` is increased.
- This is because I think that people considered lower class tend to buy houses that are cheaper because they don't earn that much money.
- In addition I think that these people are not able to loan much money to buy more expensive houses.
#### PTRATIO
- In my opinion a increase of `PTRATIO` will decrease the value of `MEDV`.
- This is because a lower `PTRATIO` means less students per teacher. This often means that these schools are more exclusive, that teachers can better teach students complex subjects and that the school does not need to invest vast sums of money in learning material.
- Those schools will be located in neighborhood having higher house prices.
</span>
----
## Developing a Model
In this second section of the project, you will develop the tools and techniques necessary for a model to make a prediction. Being able to make accurate evaluations of each model's performance through the use of these tools and techniques helps to greatly reinforce the confidence in your predictions.
### Implementation: Define a Performance Metric
It is difficult to measure the quality of a given model without quantifying its performance over training and testing. This is typically done using some type of performance metric, whether it is through calculating some type of error, the goodness of fit, or some other useful measurement. For this project, you will be calculating the [*coefficient of determination*](http://stattrek.com/statistics/dictionary.aspx?definition=coefficient_of_determination), R<sup>2</sup>, to quantify your model's performance. The coefficient of determination for a model is a useful statistic in regression analysis, as it often describes how "good" that model is at making predictions.
The values for R<sup>2</sup> range from 0 to 1, which captures the percentage of squared correlation between the predicted and actual values of the **target variable**. A model with an R<sup>2</sup> of 0 is no better than a model that always predicts the *mean* of the target variable, whereas a model with an R<sup>2</sup> of 1 perfectly predicts the target variable. Any value between 0 and 1 indicates what percentage of the target variable, using this model, can be explained by the **features**. _A model can be given a negative R<sup>2</sup> as well, which indicates that the model is **arbitrarily worse** than one that always predicts the mean of the target variable._
For the `performance_metric` function in the code cell below, you will need to implement the following:
- Use `r2_score` from `sklearn.metrics` to perform a performance calculation between `y_true` and `y_predict`.
- Assign the performance score to the `score` variable.
```
# TODO: Import 'r2_score'
from sklearn.metrics import r2_score
def performance_metric(y_true, y_predict):
""" Calculates and returns the performance score between
true and predicted values based on the metric chosen. """
# TODO: Calculate the performance score between 'y_true' and 'y_predict'
score = r2_score(y_true, y_predict)
# Return the score
return score
```
### Question 2 - Goodness of Fit
Assume that a dataset contains five data points and a model made the following predictions for the target variable:
| True Value | Prediction |
| :-------------: | :--------: |
| 3.0 | 2.5 |
| -0.5 | 0.0 |
| 2.0 | 2.1 |
| 7.0 | 7.8 |
| 4.2 | 5.3 |
Run the code cell below to use the `performance_metric` function and calculate this model's coefficient of determination.
```
# Calculate the performance of this model
score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3])
print("Model has a coefficient of determination, R^2, of {:.3f}.".format(score))
```
* Would you consider this model to have successfully captured the variation of the target variable?
* Why or why not?
**Hint:** The R2 score is the proportion of the variance in the dependent variable that is predictable from the independent variable. In other words:
* R2 score of 0 means that the dependent variable cannot be predicted from the independent variable.
* R2 score of 1 means the dependent variable can be predicted from the independent variable.
* R2 score between 0 and 1 indicates the extent to which the dependent variable is predictable. An
* R2 score of 0.40 means that 40 percent of the variance in Y is predictable from X.
<span class="answer">**Answer:**
A score of *0.923* means that about *93%* of the variance is predictable using this model as this score is nearly 1.
But as there are only five data points here it may be hard to draw conclusion that this is statistically significant.
</span>
### Implementation: Shuffle and Split Data
Your next implementation requires that you take the Boston housing dataset and split the data into training and testing subsets. Typically, the data is also shuffled into a random order when creating the training and testing subsets to remove any bias in the ordering of the dataset.
For the code cell below, you will need to implement the following:
- Use `train_test_split` from `sklearn.model_selection` to shuffle and split the `features` and `prices` data into training and testing sets.
- Split the data into 80% training and 20% testing.
- Set the `random_state` for `train_test_split` to a value of your choice. This ensures results are consistent.
- Assign the train and testing splits to `X_train`, `X_test`, `y_train`, and `y_test`.
```
# TODO: Import 'train_test_split'
from sklearn.model_selection import train_test_split
# TODO: Shuffle and split the data into training and testing subsets
X_train, X_test, y_train, y_test = train_test_split(features, prices, shuffle=True,
test_size=0.2, random_state=4711)
# Success
print("Training and testing split was successful.")
```
### Question 3 - Training and Testing
* What is the benefit to splitting a dataset into some ratio of training and testing subsets for a learning algorithm?
**Hint:** Think about how overfitting or underfitting is contingent upon how splits on data is done.
<span class="answer">**Answer:**
The main goal of using machine learning is to be able to predict new values based on existing data. That is learning *from the past*. We use existing data to train the algorithm. But after that we need to know whether our model *works* and is able to predict new values. Hence we need to test the model.
We can't use the data we used to train it because it can just memorize them. It will then be able to predict all of the test data. That way we don't know whether the model is able to generalize.
The benefit of splitting a dataset into training and testing subsets is therefore that we can easier test whether the model works and does not overfit to the training data. We can train the model using one part of the data and then evaluate it using the other part of unseen data.
</span>
----
## Analyzing Model Performance
In this third section of the project, you'll take a look at several models' learning and testing performances on various subsets of training data. Additionally, you'll investigate one particular algorithm with an increasing `'max_depth'` parameter on the full training set to observe how model complexity affects performance. Graphing your model's performance based on varying criteria can be beneficial in the analysis process, such as visualizing behavior that may not have been apparent from the results alone.
### Learning Curves
The following code cell produces four graphs for a decision tree model with different maximum depths. Each graph visualizes the learning curves of the model for both training and testing as the size of the training set is increased. Note that the shaded region of a learning curve denotes the uncertainty of that curve (measured as the standard deviation). The model is scored on both the training and testing sets using R<sup>2</sup>, the coefficient of determination.
Run the code cell below and use these graphs to answer the following question.
```
# Produce learning curves for varying training set sizes and maximum depths
vs.ModelLearning(features, prices)
```
### Question 4 - Learning the Data
* Choose one of the graphs above and state the maximum depth for the model.
* What happens to the score of the training curve as more training points are added? What about the testing curve?
* Would having more training points benefit the model?
**Hint:** Are the learning curves converging to particular scores? Generally speaking, the more data you have, the better. But if your training and testing curves are converging with a score above your benchmark threshold, would this be necessary?
Think about the pros and cons of adding more training points based on if the training and testing curves are converging.
<span class="answer">**Answer:**
In the graph having **max_depth = 10** we can see an overfitting model.
This model is able to build a Decision Tree **10** levels deep very fast using the training data. Hence the *training score* is high all the time. I think that adding more training points would lower this score somewhere at a huge amount of training points as we see a very low decrease in the score from 50 to 400 points.
Evaluating this model using the *test data* shows that the model is able to predict at a low score but is then not able to generalize. Hence the score raises with the first 50 data points (I assume that these points can be predicted mostly due to memorization). But the testing score remains at about 0.5 or 0.6 and will remain there.
</span>
### Complexity Curves
The following code cell produces a graph for a decision tree model that has been trained and validated on the training data using different maximum depths. The graph produces two complexity curves — one for training and one for validation. Similar to the **learning curves**, the shaded regions of both the complexity curves denote the uncertainty in those curves, and the model is scored on both the training and validation sets using the `performance_metric` function.
**Run the code cell below and use this graph to answer the following two questions Q5 and Q6.**
```
vs.ModelComplexity(X_train, y_train)
```
### Question 5 - Bias-Variance Tradeoff
* When the model is trained with a maximum depth of 1, does the model suffer from high bias or from high variance?
* How about when the model is trained with a maximum depth of 10? What visual cues in the graph justify your conclusions?
**Hint:** High bias is a sign of underfitting(model is not complex enough to pick up the nuances in the data) and high variance is a sign of overfitting(model is by-hearting the data and cannot generalize well). Think about which model(depth 1 or 10) aligns with which part of the tradeoff.
<span class="answer">**Answer:**
- The model using a maximum depth of **1** suffers from high **bias**. This can be seen by having a low *training* and *testing* score.
- Training the model with a maximum depth of **10** suffers from high **variance**. This can be seen by the both curves drifting apart. This model is to complex and hence the validation score drops but the training score increases.
</span>
### Question 6 - Best-Guess Optimal Model
* Which maximum depth do you think results in a model that best generalizes to unseen data?
* What intuition lead you to this answer?
**Hint:** Look at the graph above Question 5 and see where the validation scores lie for the various depths that have been assigned to the model. Does it get better with increased depth? At what point do we get our best validation score without overcomplicating our model? And remember, Occams Razor states "Among competing hypotheses, the one with the fewest assumptions should be selected."
<span class="answer">**Answer:**
I think that the best model here is that with a maximum depth of **4**.
This is because that model has the highest *validation score* and the standard deviation of this model is as close as for **max_depth=3**. I'm a little unsure about this - but I assume that the best is either **4** or **3** because the variance at *3* is lower and the model is less complex.
But I first would start with a maxium depth of 4.
</span>
-----
## Evaluating Model Performance
In this final section of the project, you will construct a model and make a prediction on the client's feature set using an optimized model from `fit_model`.
### Question 7 - Grid Search
* What is the grid search technique?
* How it can be applied to optimize a learning algorithm?
**Hint:** When explaining the Grid Search technique, be sure to touch upon why it is used, what the 'grid' entails and what the end goal of this method is. To solidify your answer, you can also give an example of a parameter in a model that can be optimized using this approach.
<span class="answer">**Answer:**
In fact a machine learning model is finally a very complex mathematical function. All features are parameters of this function and the result is the prediction. In our example the result is a predicted price of a house.
When a model is trained it learns to draw conclusions between the features and the predictions. But what remains the same during the training process is the choosen algorithm. The model does not switch from say an SVM to a decision tree.
Each algorithm does have it's own set of parameters that must be set manually. For example the maximum depth of a decision tree or the polynomial degree. These parameters are called hyper-parameters.
As these parameters are that important their optimization is a needed step in machine learning. One possible tuning of these parameters is just the manual approach to try every promising (or possible) combination. But this would be a challenging and time consuming process.
Hence an automated process should be used. One method is a grid search. This works by splitting the dataset into three sets: A training set, a validation set and a test set. Then a grid is defined over the hyper-parameters and then the model performance for each point on the grid is evaluated using the training and validation set.
The goal is to find the best hyper-parameters of the model that maximises the accuracy score. In our example above it can be used to find the best depth to stop the training of a decision tree model before it gets to complex.
</span>
### Question 8 - Cross-Validation
* What is the k-fold cross-validation training technique?
* What benefit does this technique provide for grid search when optimizing a model?
**Hint:** When explaining the k-fold cross validation technique, be sure to touch upon what 'k' is, how the dataset is split into different parts for training and testing and the number of times it is run based on the 'k' value.
When thinking about how k-fold cross validation helps grid search, think about the main drawbacks of grid search which are hinged upon **using a particular subset of data for training or testing** and how k-fold cv could help alleviate that. You can refer to the [docs](http://scikit-learn.org/stable/modules/cross_validation.html#cross-validation) for your answer.
<span class="answer">**Answer:**
The above mentioned grid search technique has two problems:
1. It further reduces the training set.
2. Model performance heavily depends on the choice for the pair of the sets. For example: The iris dataflower set might be in an order that - if you just choose to use the last say 10% for tests and the last 10% of your training set as validation set - you train only on one category of data.
The k-fold cross-validation technique can help reducing these problems. It is used as follows:
- The whole dataset is used and no split into training and test set is done.
- The dataset is randomly split (*and optionally shuffled*) into *k* subsamples of the same size.
- Then the model is trained *k* times. Each training is done on *k-1* subsamples and the remaining sample is used as test set.
That way every subsample is used exactly once as test set and all observations are used for both training and test. The final single result is then the averaged *k* results.
For example, setting k = 3 results in 3 cross-validations. The data is split into set1, set2 and set3. Then the model is trained using
- set1 and set2 -> test on set3
- set2 and set3 -> test on set1
- set3 and set1 -> test on set2
These three results are then averaged.
When using *sklearn* one can use k-fold cv with grid search using the parameter (guess it) *cv*. This can be set to an integer so that sklearn is using this as *k*. Some versions of sklearn do use *cv* setting *k=3* per default. With version 0.22 *k=5* if *cv* is not set when executing grid search.
</span>
### Implementation: Fitting a Model
Your final implementation requires that you bring everything together and train a model using the **decision tree algorithm**. To ensure that you are producing an optimized model, you will train the model using the grid search technique to optimize the `'max_depth'` parameter for the decision tree. The `'max_depth'` parameter can be thought of as how many questions the decision tree algorithm is allowed to ask about the data before making a prediction. Decision trees are part of a class of algorithms called *supervised learning algorithms*.
In addition, you will find your implementation is using `ShuffleSplit()` for an alternative form of cross-validation (see the `'cv_sets'` variable). While it is not the K-Fold cross-validation technique you describe in **Question 8**, this type of cross-validation technique is just as useful!. The `ShuffleSplit()` implementation below will create 10 (`'n_splits'`) shuffled sets, and for each shuffle, 20% (`'test_size'`) of the data will be used as the *validation set*. While you're working on your implementation, think about the contrasts and similarities it has to the K-fold cross-validation technique.
For the `fit_model` function in the code cell below, you will need to implement the following:
- Use [`DecisionTreeRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html) from `sklearn.tree` to create a decision tree regressor object.
- Assign this object to the `'regressor'` variable.
- Create a dictionary for `'max_depth'` with the values from 1 to 10, and assign this to the `'params'` variable.
- Use [`make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html) from `sklearn.metrics` to create a scoring function object.
- Pass the `performance_metric` function as a parameter to the object.
- Assign this scoring function to the `'scoring_fnc'` variable.
- Use [`GridSearchCV`](http://scikit-learn.org/0.20/modules/generated/sklearn.model_selection.GridSearchCV.html) from `sklearn.model_selection` to create a grid search object.
- Pass the variables `'regressor'`, `'params'`, `'scoring_fnc'`, and `'cv_sets'` as parameters to the object.
- Assign the `GridSearchCV` object to the `'grid'` variable.
```
# TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV'
from sklearn.metrics import make_scorer
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
def fit_model(X, y):
""" Performs grid search over the 'max_depth' parameter for a
decision tree regressor trained on the input data [X, y]. """
# Create cross-validation sets from the training data
cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0)
# TODO: Create a decision tree regressor object
regressor = DecisionTreeRegressor()
# TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10
params = {'max_depth': list(range(1, 11))}
# TODO: Transform 'performance_metric' into a scoring function using 'make_scorer'
scoring_fnc = make_scorer(performance_metric)
# TODO: Create the grid search cv object --> GridSearchCV()
# Make sure to include the right parameters in the object:
# (estimator, param_grid, scoring, cv) which have values 'regressor', 'params', 'scoring_fnc', and 'cv_sets' respectively.
grid = GridSearchCV(regressor, params, scoring=scoring_fnc, cv=cv_sets)
# Fit the grid search object to the data to compute the optimal model
grid = grid.fit(X, y)
# Return the optimal model after fitting the data
return grid.best_estimator_
```
### Making Predictions
Once a model has been trained on a given set of data, it can now be used to make predictions on new sets of input data. In the case of a *decision tree regressor*, the model has learned *what the best questions to ask about the input data are*, and can respond with a prediction for the **target variable**. You can use these predictions to gain information about data where the value of the target variable is unknown — such as data the model was not trained on.
### Question 9 - Optimal Model
* What maximum depth does the optimal model have? How does this result compare to your guess in **Question 6**?
Run the code block below to fit the decision tree regressor to the training data and produce an optimal model.
```
# Fit the training data to the model using grid search
reg = fit_model(X_train, y_train)
# Produce the value for 'max_depth'
print("Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth']))
```
**Hint:** The answer comes from the output of the code snipped above.
<span class="answer">**Answer:**
The optimal model is using `max_depth` of **4**.
This is what I said in **Question 6** but I was unsure between 3 and 4.
</span>
### Question 10 - Predicting Selling Prices
Imagine that you were a real estate agent in the Boston area looking to use this model to help price homes owned by your clients that they wish to sell. You have collected the following information from three of your clients:
| Feature | Client 1 | Client 2 | Client 3 |
| :---: | :---: | :---: | :---: |
| Total number of rooms in home | 5 rooms | 4 rooms | 8 rooms |
| Neighborhood poverty level (as %) | 17% | 32% | 3% |
| Student-teacher ratio of nearby schools | 15-to-1 | 22-to-1 | 12-to-1 |
* What price would you recommend each client sell his/her home at?
* Do these prices seem reasonable given the values for the respective features?
**Hint:** Use the statistics you calculated in the **Data Exploration** section to help justify your response. Of the three clients, client 3 has has the biggest house, in the best public school neighborhood with the lowest poverty level; while client 2 has the smallest house, in a neighborhood with a relatively high poverty rate and not the best public schools.
Run the code block below to have your optimized model make predictions for each client's home.
```
# Produce a matrix for client data
client_data = [[5, 17, 15], # Client 1
[4, 32, 22], # Client 2
[8, 3, 12]] # Client 3
# Show predictions
for i, price in enumerate(reg.predict(client_data)):
print("Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price))
```
<span class="answer">**Answer:**
Let's see the statistics again:
</span>
```
pd.options.display.float_format = '{:,.2f}'.format
data.describe()
```
<span class="answer">
- The mean price is **454,342.94 USD**.
- The median price is **438,900.00 USD**.
- The mean and median RM is **6** rooms.
- The mean LSTAT is **12.94%**.
- The median LSTAT is **11.69%**.
- The mean PTRATIO is **18.52**.
- The median PTRATIO is **19.10**.
- I suspected:
- Increased RM: Increased Price
- Increased LSTAT: Decreased Price
- Increased PTRATIO: Decreased Price
I choose the median price for comparison because the median is more robust to outliers.
**Client 1's home: 406,000.00 USD**
- This home has 5 rooms, 17% Neighborhood poverty level and a 15-to-1 PT ratio.
- Of all three clients this home is at the middle of the given features.
- This home has fewer rooms than the median -> this should decrease the price (but NOT that much).
- It has a higher LSTAT than the median -> this should decrease the price.
- The PTRATIO is lower than the median -> this will increase the price.
The price of **406,000 USD** seems reasonable for me as this is a little decrease from the median price of **438,00.00 USD**.
**Client 2's home: $233,221.15 USD**
- This home has 4 rooms, 32% Neighborhood poverty level and a 22-to-1 PT ratio.
- Of all three clients this home is the lowest of the given features.
- This home has fewer rooms than the median -> this should decrease the price (but NOT that much).
- It has a higher LSTAT than the median -> this should decrease the price.
- The PTRATIO is higher than the median -> this will decrease the price.
The price of **233,221.15 USD** seems again reasonable as LSTAT and PTRATIO are above the 75-%-Quantile whereas the RM is above the 25-%-Quantile. Hence the price should be lower than the 25-%-Quantile of **MEDV**.
**Client 3's home: $960,540.00**
- This home has 8 rooms, 3% poverty level and a 12-to-1 PT ratio.
- Of all three clients this home is located in the best neighborhood.
- It has more rooms than the 75-%-Quantile.
- It has a lower LSTAT than the 25-%-Quantile and is nearly as low as the minimum.
- It has a PTRATIO at the minimum.
The price of **960,540.00 USD** is near the maximum. Of all three houses it should have the highest price. Hence this price is reasonable for me.
</span>
### Sensitivity
An optimal model is not necessarily a robust model. Sometimes, a model is either too complex or too simple to sufficiently generalize to new data. Sometimes, a model could use a learning algorithm that is not appropriate for the structure of the data given. Other times, the data itself could be too noisy or contain too few samples to allow a model to adequately capture the target variable — i.e., the model is underfitted.
**Run the code cell below to run the `fit_model` function ten times with different training and testing sets to see how the prediction for a specific client changes with respect to the data it's trained on.**
```
vs.PredictTrials(features, prices, fit_model, client_data)
```
### Question 11 - Applicability
* In a few sentences, discuss whether the constructed model should or should not be used in a real-world setting.
**Hint:** Take a look at the range in prices as calculated in the code snippet above. Some questions to answering:
- How relevant today is data that was collected from 1978? How important is inflation?
- Are the features present in the data sufficient to describe a home? Do you think factors like quality of apppliances in the home, square feet of the plot area, presence of pool or not etc should factor in?
- Is the model robust enough to make consistent predictions?
- Would data collected in an urban city like Boston be applicable in a rural city?
- Is it fair to judge the price of an individual home based on the characteristics of the entire neighborhood?
<span class="answer">**Answer:**
I'd not use this model in a real-world setting. This is because:
- As seen above the model does predict a range of in prices about 73,357.39 USD. This is not a robust model to make consistent predictions.
- House prices have changed since 1978 for sure.
- Inflation is not relevant because prices for real estate mostly increase over time and are highly volatile. Hence even a model based on data from say 4 years ago might not be useful today.
- In addition the features present in the data are not sufficient to build a reliable model: Even in the original data we do have 14 available features. It would be helpful to explore more than the *3* used. For example the square feet, the crime rate, nitric oxides or the index of accessibility to radial highways would for sure influence the prices.
- Even if this model could be used it could be used only to predict prices in Boston. It would be optimal to predict even the prices in cities that are similar of Boston.
- If there were data from a wide range of urban and rural cities that includes a lot more features like population, GDP, age and so on we would be able to construct a model to predict prices across different cities. But even such a model should be suspected or used very careful.
</span>
|
github_jupyter
|
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from sklearn.model_selection import ShuffleSplit
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the Boston housing dataset
data = pd.read_csv('housing.csv')
prices = data['MEDV']
features = data.drop('MEDV', axis = 1)
# Success
print("Boston housing dataset has {} data points with {} variables each.".format(*data.shape))
# TODO: Minimum price of the data
minimum_price = np.min(prices)
# TODO: Maximum price of the data
maximum_price = np.max(prices)
# TODO: Mean price of the data
mean_price = np.mean(prices)
# TODO: Median price of the data
median_price = np.median(prices)
# TODO: Standard deviation of prices of the data
std_price = np.std(prices)
# Show the calculated statistics
print("Statistics for Boston housing dataset:\n")
print("Minimum price: ${}".format(minimum_price))
print("Maximum price: ${}".format(maximum_price))
print("Mean price: ${}".format(mean_price))
print("Median price ${}".format(median_price))
print("Standard deviation of prices: ${}".format(std_price))
# TODO: Import 'r2_score'
from sklearn.metrics import r2_score
def performance_metric(y_true, y_predict):
""" Calculates and returns the performance score between
true and predicted values based on the metric chosen. """
# TODO: Calculate the performance score between 'y_true' and 'y_predict'
score = r2_score(y_true, y_predict)
# Return the score
return score
# Calculate the performance of this model
score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3])
print("Model has a coefficient of determination, R^2, of {:.3f}.".format(score))
# TODO: Import 'train_test_split'
from sklearn.model_selection import train_test_split
# TODO: Shuffle and split the data into training and testing subsets
X_train, X_test, y_train, y_test = train_test_split(features, prices, shuffle=True,
test_size=0.2, random_state=4711)
# Success
print("Training and testing split was successful.")
# Produce learning curves for varying training set sizes and maximum depths
vs.ModelLearning(features, prices)
vs.ModelComplexity(X_train, y_train)
# TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV'
from sklearn.metrics import make_scorer
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
def fit_model(X, y):
""" Performs grid search over the 'max_depth' parameter for a
decision tree regressor trained on the input data [X, y]. """
# Create cross-validation sets from the training data
cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0)
# TODO: Create a decision tree regressor object
regressor = DecisionTreeRegressor()
# TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10
params = {'max_depth': list(range(1, 11))}
# TODO: Transform 'performance_metric' into a scoring function using 'make_scorer'
scoring_fnc = make_scorer(performance_metric)
# TODO: Create the grid search cv object --> GridSearchCV()
# Make sure to include the right parameters in the object:
# (estimator, param_grid, scoring, cv) which have values 'regressor', 'params', 'scoring_fnc', and 'cv_sets' respectively.
grid = GridSearchCV(regressor, params, scoring=scoring_fnc, cv=cv_sets)
# Fit the grid search object to the data to compute the optimal model
grid = grid.fit(X, y)
# Return the optimal model after fitting the data
return grid.best_estimator_
# Fit the training data to the model using grid search
reg = fit_model(X_train, y_train)
# Produce the value for 'max_depth'
print("Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth']))
# Produce a matrix for client data
client_data = [[5, 17, 15], # Client 1
[4, 32, 22], # Client 2
[8, 3, 12]] # Client 3
# Show predictions
for i, price in enumerate(reg.predict(client_data)):
print("Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price))
pd.options.display.float_format = '{:,.2f}'.format
data.describe()
vs.PredictTrials(features, prices, fit_model, client_data)
| 0.607896 | 0.992267 |
```
import numpy as np
import pandas as pd
import scipy as sp
import sklearn as sl
import statistics
import matplotlib as mpl
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
from pylab import*
from scipy import integrate
%matplotlib inline
```
## 1. Escriba la ecuación de difusión para el tiempo $\tau$ en coordenadas esféricas y asuma que $\tau(r,\theta,\phi)\simeq\tau(r)$ ya que por simetría esférica sólo dependerá de la posición radial respecto al centro de la gotica (Usar **LaTex**)
Tenemos que :
$\displaystyle \begin{array}{{>{\displaystyle}l}}
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \nabla ^{2} \tau \ =\ -\ \frac{6\eta \pi a}{k_{B}\tau } \ \ \ \ \ \ \ ( 1) \ \ \ \ \ \ \\
\end{array}$
Por lo cual,
$\displaystyle \nabla ^{2} \tau \ =\ \frac{1}{r^{2}}\frac{\partial }{\partial r}\left( r^{2}\frac{\partial \tau }{\partial r}\right) \ +\ \frac{1}{r^{2} sen\theta }\frac{\partial }{\partial \theta } \ \left( sen\theta \ \frac{\partial \tau }{\partial \theta }\right) \ +\ \frac{1}{r^{2} sen\theta }\frac{\partial ^{2} \tau }{\partial \varphi ^{2}} \ \ \ \ \ \ \ \ \ \ \ ( 2)$
$\displaystyle \nabla ^{2} \tau =\frac{1}{r^{2}}\frac{\partial }{\partial r}\left( r^{2}\frac{\partial \tau }{\partial r}\right) \ +\ 0\ +\ 0\ \ \ \ \ \ $$\displaystyle \begin{array}{{>{\displaystyle}l}}
( 3)\\
\end{array}$
$\displaystyle -\frac{6\eta \pi a}{k_{B} \tau } \ \ =\ \frac{1}{r^{2}}\frac{\partial }{\partial r}\left( r^{2}\frac{\partial \tau }{\partial r}\right) \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ ( 4)$
$\displaystyle -\ \frac{6\eta \pi a}{k_{B} \tau } \ =\frac{1}{r^{2}}\left( 2r\frac{\partial \tau }{\partial r} \ +\ r^{2}\frac{\partial ^{2} \tau }{\partial r^{2}} \ \right) \ \ \ \ ( 5)$
$\displaystyle \ \ \ \ \ -\ \frac{6\eta \pi a}{k_{B} \tau } \ =\frac{\partial ^{2} \tau }{\partial r^{2}} \ +\ \frac{2\partial \tau }{r\partial r} \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ ( 6) \ $
$ $
$\displaystyle 0\ =\ \frac{\partial ^{2} \tau }{\partial r^{2}} \ +\ \frac{2\partial \tau }{r\partial r} \ +$$\displaystyle \frac{6\eta \pi a}{k_{B} \tau } \ \ \ \ \ \ \ \ \ ( 7) \ $
## 2. Resuelva la ecuación diferencial para el tiempo de forma numérica y grafique
Asuma las siguientes condiciones iniciales:
1. $\tau(R)=0$ ya que si el virión está en la superficie el tiempo debe ser naturalmente nulo.
1. $\tau^\prime(r)=0$ ya que por simetría la derivada radial debe ser nula en el origen
Suponga las siguientes condiciones:
- $R=5\mu m$ para el radio de la esfera de *quasi* agua (calcula el volumen $V$)
- $\eta_{\text{H}_2\text{O}}\simeq1\times10^{-3}\,Pa\cdot s$ (Pascales por segundo)
- $\frac{\eta}{\eta_{\text{H}_2\text{O}}}\approx10^3\to10^5$
- $a\simeq100\,nm$
- $V=\frac{4}{3}\pi a^3$
- $k_BT\simeq4.05\times10^{-21}J$
si:
$$
\frac{6\pi \eta a}{K_{b}T}=\frac{1}{r^{2}}\frac{d}{dr}\left ( r^{2} \frac{d \tau }{dr}\right )
$$
$$
\frac{1}{r^{2}}\left [ \left ( 2r\frac{\partial \tau }{\partial r} + r^{2}\frac{\partial ^{2}\tau }{\partial r^{2}}\right ) \right ]=\frac{1}{D}
$$
$$
\frac{2}{r}\frac{\partial \tau }{\partial r} + \frac{\partial ^{2}\tau }{\partial r^{2}}=-\frac{1}{D}
$$
$$
{y}''+2\frac{1}{x}{y}'+\frac{1}{D}=0
$$
$$
{\tau }''+\frac{2}{r}{\tau }'+\frac{1}{D}=0
$$
$$
\tau_{h} =c_{1}+\frac{c_{2}}{r} , \tau_{p} =-\frac{r^{2}}{6\times D}
$$
$$
\tau =c_{1}+\frac{c_{2}}{r}-\frac{r^{2}}{6\times D}
$$
```
a = 1*(10**(-7))
η = 1*10**(2)
R = 5*(10**(-6))
D= (4.05*10**(-21))/(6*np.pi*η*a)
N = 170
der = (R)/(N)
cond0 = 0
condN = 0
r = np.arange(0, R, der)
fila = np.matrix([np.array([(r[i]-der if j == i-1 and i > 0 else 0)for j in range(N)])for i in range(N) ])
fila = fila + np.matrix([np.array([(-2*r[i] if j == i else 0)for j in range(N)])for i in range(N) ])
fila = fila + np.matrix([np.array([(r[i]+der if j == i+1 and i < N-1 else 0)for j in range(N)])for i in range(N) ])
fila[0,:] = np.zeros(N)
fila[0,0] = 1
fila[0,1] = -1
fila[-1, -1] = 1
c = (-r*(der**2)/D)
c[0] = cond0
c[-1] = condN
c = np.matrix(c).T
T = np.array((np.linalg.inv(fila)*c).T).flatten()
ra = np.arange(0,5,5/N)
T
plt.plot(ra, T, 'g')
plt.xlabel('E X')
plt.ylabel('Eje Y')
```
## 3. Si los viriones están distribuidos uniformemente, encuentre el tiempo que tardaría un virión en salir de la gota de aerosol.
Tenga presente que debe promediar suponiendo que el virión tiene una distribución uniforme, i.e. $\rho\left(\vec{r}\right)=1/V$, usando la siguiente relación,
$$
\bar{\tau} = \int_{\mathcal{V}}\tau\left(\vec{r}\right)\rho\left(\vec{r}\right)\,\text{d}\vec{r} = \frac{4\pi}{V}\int_{0}^{R}\tau(r)\,r^2\text{d}r.
$$
Realice la integral numéricamente.
$$
\frac{4\pi }{V}R^{2}\left ( c_{1}\frac{R}{3}+c_{2}\frac{1}{2}+\frac{R^{3}}{30D} \right )
$$
```
a = 1*(10**(-7))
R = 5*(10**-6)
r =0.000001
V = (4/3)*(np.pi*(R**3))
n = 1000000
D= (4.05*10**(-17))/(6*np.pi*(100*(a)))
def t(r):
return(4*np.pi/V)*((R**2/(6*D))-(1/(6*D)*r**2))*(r**2)
def simpint(r,R,n,t):
x, dex = np.linspace(r,R,n,retstep= True )
return (dex/3)*(t(x[0])+2*np.sum(t(x[2:len(x)-1:2]))+ 4*np.sum(t(x[1::2]))+t(x[-1]))
valorfinal = simpint(r,R,n,t)
valorfinal
```
## 4. Las cadenas de Markov.
Vamos a resolver el problema anterior usando un proceso de Markov. Suponga que ud **divide** la esfera en cubitos de ancho $\delta x=\delta y=\delta z=\Delta=R/N$ con $N$ un número determinado de particiones. Para nuestro experimento, vamos a suponer que ponemos un virión en una posición inicial $\vec{r}_0=(\Delta\,j, 0, 0)$, determinada por un índice $j\in\{0,1,2,\dots,N\}$. Ud va a actualizar la posición del virión en la malla discreta siguiendo las reglas a continuación:
- Determine el número de divisiones $N$ y calcule $\Delta$.
- Ajuste la escala de tiempo $\delta t$ y de tal manera que la probabilidad $\alpha=D\frac{\delta t}{\Delta^2}<\frac{1}{6}$. (Recomiendo $\leq1/12$)
- Haga una corrida de Markov-Monte Carlo actualizando la posición con la probabilidad de transición $\alpha$ hacia los primeros vecinos cercanos y calcule el número de pasos de tiempo para llegar a la superficie, i.e. $|\vec{r}(t_m)|>R-\Delta$
- Repita este experimento para la misma posición un gran número de veces para obtener una estadística (media y desviación estándar).
- Repita todos los pasos para todos los índices $j\in\{0,1,2,\dots,N\}$ y grafique. ¡Compare con los resultados anteriores!
```
L = 2
T = 4
N = 10
dex = L/N
x = np.arange(0,L,dex)
D = 1/12
dt = 1*10**(-4)
t = np.arange(0,T,dt)
alfa = D*dt/dex*dex
assert alfa < 1/12
alfa
def rec(n):
global alfa
vir = np.random.uniform(size=n)
left = sum(vir<alfa)
right = sum(vir<2*alfa) - left
return (left,right)
def nueva(psi):
npsi = np.array([actualiza_objetos(_psi) for _psi in psi]).T
npsi[0,0] = 0
npsi[1,-1] = 0
npsileft = np.roll(npsi[0],-1)
npsiright = np.roll(npsi[1], 1)
npsi = np.sum(npsi, axis=0)
return psi+npsiright+npsileft-npsi
psi = np.zeros(N, dtype=np.uint32)
psi[0] = 25
psi[N//2] = 75
psi[N//3]= 100
plt.scatter(x, psi,color='m')
plt.xlabel('x')
plt.ylabel('y')
L = 2
T = 4
N = 20
dex = L/N
x = np.arange(0,L,dex)
D = 1/12
dt = 1*10**(-4)
t = np.arange(0,T,dt)
alfa = D*dt/dex*dex
assert alfa < 1/12
alfa
def rec(n):
global alfa
vir = np.random.uniform(size=n)
left = sum(vir<alfa)
right = sum(vir<2*alfa) - left
return (left,right)
def nueva(psi):
npsi = np.array([actualiza_objetos(_psi) for _psi in psi]).T
npsi[0,0] = 0
npsi[1,-1] = 0
npsileft = np.roll(npsi[0],-1)
npsiright = np.roll(npsi[1], 1)
npsi = np.sum(npsi, axis=0)
return psi+npsiright+npsileft-npsi
psi = np.zeros(N, dtype=np.uint32)
psi[0] = 50
psi[N//2] = 150
psi[N//3]= 200
plt.scatter(x, psi,color='y')
plt.xlabel('x')
plt.ylabel('y')
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import scipy as sp
import sklearn as sl
import statistics
import matplotlib as mpl
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
from pylab import*
from scipy import integrate
%matplotlib inline
a = 1*(10**(-7))
η = 1*10**(2)
R = 5*(10**(-6))
D= (4.05*10**(-21))/(6*np.pi*η*a)
N = 170
der = (R)/(N)
cond0 = 0
condN = 0
r = np.arange(0, R, der)
fila = np.matrix([np.array([(r[i]-der if j == i-1 and i > 0 else 0)for j in range(N)])for i in range(N) ])
fila = fila + np.matrix([np.array([(-2*r[i] if j == i else 0)for j in range(N)])for i in range(N) ])
fila = fila + np.matrix([np.array([(r[i]+der if j == i+1 and i < N-1 else 0)for j in range(N)])for i in range(N) ])
fila[0,:] = np.zeros(N)
fila[0,0] = 1
fila[0,1] = -1
fila[-1, -1] = 1
c = (-r*(der**2)/D)
c[0] = cond0
c[-1] = condN
c = np.matrix(c).T
T = np.array((np.linalg.inv(fila)*c).T).flatten()
ra = np.arange(0,5,5/N)
T
plt.plot(ra, T, 'g')
plt.xlabel('E X')
plt.ylabel('Eje Y')
a = 1*(10**(-7))
R = 5*(10**-6)
r =0.000001
V = (4/3)*(np.pi*(R**3))
n = 1000000
D= (4.05*10**(-17))/(6*np.pi*(100*(a)))
def t(r):
return(4*np.pi/V)*((R**2/(6*D))-(1/(6*D)*r**2))*(r**2)
def simpint(r,R,n,t):
x, dex = np.linspace(r,R,n,retstep= True )
return (dex/3)*(t(x[0])+2*np.sum(t(x[2:len(x)-1:2]))+ 4*np.sum(t(x[1::2]))+t(x[-1]))
valorfinal = simpint(r,R,n,t)
valorfinal
L = 2
T = 4
N = 10
dex = L/N
x = np.arange(0,L,dex)
D = 1/12
dt = 1*10**(-4)
t = np.arange(0,T,dt)
alfa = D*dt/dex*dex
assert alfa < 1/12
alfa
def rec(n):
global alfa
vir = np.random.uniform(size=n)
left = sum(vir<alfa)
right = sum(vir<2*alfa) - left
return (left,right)
def nueva(psi):
npsi = np.array([actualiza_objetos(_psi) for _psi in psi]).T
npsi[0,0] = 0
npsi[1,-1] = 0
npsileft = np.roll(npsi[0],-1)
npsiright = np.roll(npsi[1], 1)
npsi = np.sum(npsi, axis=0)
return psi+npsiright+npsileft-npsi
psi = np.zeros(N, dtype=np.uint32)
psi[0] = 25
psi[N//2] = 75
psi[N//3]= 100
plt.scatter(x, psi,color='m')
plt.xlabel('x')
plt.ylabel('y')
L = 2
T = 4
N = 20
dex = L/N
x = np.arange(0,L,dex)
D = 1/12
dt = 1*10**(-4)
t = np.arange(0,T,dt)
alfa = D*dt/dex*dex
assert alfa < 1/12
alfa
def rec(n):
global alfa
vir = np.random.uniform(size=n)
left = sum(vir<alfa)
right = sum(vir<2*alfa) - left
return (left,right)
def nueva(psi):
npsi = np.array([actualiza_objetos(_psi) for _psi in psi]).T
npsi[0,0] = 0
npsi[1,-1] = 0
npsileft = np.roll(npsi[0],-1)
npsiright = np.roll(npsi[1], 1)
npsi = np.sum(npsi, axis=0)
return psi+npsiright+npsileft-npsi
psi = np.zeros(N, dtype=np.uint32)
psi[0] = 50
psi[N//2] = 150
psi[N//3]= 200
plt.scatter(x, psi,color='y')
plt.xlabel('x')
plt.ylabel('y')
| 0.274449 | 0.970268 |
## Cleaning training data - first passthrough
```
# Import pandas and numpy for data cleaning.
import pandas as pd
import numpy as np
# Load in the train .csv.
train = pd.read_csv('../data/train.csv')
# Convert all columns to lowercase and replace spaces in column names.
train.columns = [col.lower().replace(" ", "_") for col in train.columns]
# Print the shape and first 5 rows.
print(train.shape)
train.head()
# Check the info for nulls and dtypes.
train.info()
```
---
Columns with dtype float have values referring to a measurement of the feature.
For example, garage_cars refers to the number of cars that can fit in the garage, and lot_frontage refers to the length in feet of the property which is connected to the street.
For these columns, when the value is NaN, it means that the property does not have this feature, which means that the measurement is 0.
Hence, NaNs for the columns with dtype 'float' will be filled with 0 to indicate that the property has a measurement of 0 for this feature.
However, there is a column for garage_yr_blt which is also of dtype 'float'. In this case, 0 is not sensible value. Hence this column will not be imputed. Instead, it will be dropped as we already have a year built column, as well as other garage features.
```
train.drop('garage_yr_blt', axis = 1, inplace=True)
# Create a list of columns with dtype float
float_columns = list(train.select_dtypes(include = np.float64))
# Iterate through the list to fillna for these columns with 0
[train[col].fillna(0, inplace=True) for col in float_columns]
train.head()
```
---
Columns with dtype 'object' have values referring to the quality or type of the feature.
For example, mas_vnr_type refers to the masonry veneer type and alley refers to the type of alley access the property has.
For these columns, when the value is NaN, it means that the property does not have this feature.
Hence, NaNs for the columns with dtype 'object' will be filled with "None" to indicate that the property does not have this feature.
```
# Create a list of columns with dtype object
object_columns = list(train.select_dtypes(include = object))
# Iterate through the list to fillna for these columns with "None"
[train[col].fillna('None', inplace=True) for col in object_columns]
train.head()
# Convert ID and PID columns to object dtype as they should not be considered a feature.
# Also convert ms_subclass to object as this should not be considered numerical
train.id = train.id.astype(object)
train.pid = train.pid.astype(object)
train.ms_subclass = train.ms_subclass.astype(object)
# Check that there are no more NaNs
train.info()
# Rename columns for better clarity
col_names = {'street': 'street_type', 'alley': 'alley_type', 'condition_1': 'proximity_road_railroad_1',
'condition_2': 'proximity_road_railroad_2', 'exterior_1st': 'exterior_mat_1',
'exterior_2nd': 'exterior_mat_2', 'mas_vnr_type': 'masonry_veneer_type',
'mas_vnr_area': 'masonry_veneer_area', 'gr_liv_area': 'above_ground_living_area',
'totrms_abvgrd': 'total_rms_above_ground', 'fireplace_qu': 'fireplace_qual',
'3ssn_porch': 'three_season_porch', 'pool_qc': 'pool_qual', 'foundation': 'foundation_type',
'fence': 'fence_type', 'heating': 'heating_type', 'electrical': 'electrical_system',
'misc_val': 'misc_value','mo_sold': 'month_sold', 'yr_sold': 'year_sold'}
train.columns = [col_names.get(x, x) for x in train.columns]
# Save to new .csv
train.to_csv(index = False, path_or_buf = '../data/train_clean.csv')
```
## Cleaning testing data - first passthrough
For the test data, we will repeat the changes which we made in the training data.
```
# Repeat steps for test.csv
# Load in the train .csv.
test = pd.read_csv('../data/test.csv')
test.columns = [col.lower().replace(" ", "_") for col in test.columns]
test.drop('garage_yr_blt', axis=1, inplace=True)
# Create a list of columns with dtype float
float_columns = list(test.select_dtypes(include = np.float64))
# Iterate through the list to fillna for these columns with 0
[test[col].fillna(0, inplace=True) for col in float_columns];
# Create a list of columns with dtype object
object_columns = list(test.select_dtypes(include = object))
# Iterate through the list to fillna for these columns with "None"
[test[col].fillna('None', inplace=True) for col in object_columns];
# Convert ID and PID columns to object dtype as they should not be considered a feature.
# Also convert ms_subclass to object as this should not be considered numerical
test.id = test.id.astype(object)
test.pid = test.pid.astype(object)
test.ms_subclass = test.ms_subclass.astype(object)
# Rename columns for better clarity
col_names = {'street': 'street_type', 'alley': 'alley_type', 'condition_1': 'proximity_road_railroad_1',
'condition_2': 'proximity_road_railroad_2', 'exterior_1st': 'exterior_mat_1',
'exterior_2nd': 'exterior_mat_2', 'mas_vnr_type': 'masonry_veneer_type',
'mas_vnr_area': 'masonry_veneer_area', 'gr_liv_area': 'above_ground_living_area',
'totrms_abvgrd': 'total_rms_above_ground', 'fireplace_qu': 'fireplace_qual',
'3ssn_porch': 'three_season_porch', 'pool_qc': 'pool_qual', 'foundation': 'foundation_type',
'fence': 'fence_type', 'heating': 'heating_type', 'electrical': 'electrical_system',
'misc_val': 'misc_value','mo_sold': 'month_sold', 'yr_sold': 'year_sold'}
test.columns = [col_names.get(x, x) for x in test.columns]
# Save to new .csv
test.to_csv(index = False, path_or_buf = '../data/test_clean.csv')
test.head()
```
## Adjusting training and test data - second passthrough
This time we will try dummifying the categorical columns.
We align the columns of the train and test datasets using the dataset with more columns to ensure consistency in the shape.
To check that this has solved the issue, we compare the shapes.
```
# Create a copy of the train dataset
train2 = train.copy()
# Select all the object columns (except id and pid) in a list
object_columns = list(train2.select_dtypes(include = object))
object_columns.remove('id')
object_columns.remove('pid')
# Dummify all object columns
train2 = pd.get_dummies(columns = object_columns, data = train2).astype(np.int64)
train2.shape
# Convert ID and PID columns to object dtype as they should not be considered a feature.
train2.id = train2.id.astype(object)
train2.pid = train2.pid.astype(object)
# Save to new .csv
train2.to_csv(index = False, path_or_buf = '../data/train2_clean.csv')
# Create a copy of the test dataset
test2 = test.copy()
# Select all the object columns (except id and pid) in a list
object_columns = list(test2.select_dtypes(include = object))
object_columns.remove('id')
object_columns.remove('pid')
# Dummify all object columns
test2 = pd.get_dummies(columns = object_columns, data = test2).astype(np.int64)
test2.shape
# Align columns with training set
train2, test2 = train2.align(test2, join='left', axis=1)
# Fill the new nulls with 0 for the dummified columns
test2.fillna(0, inplace=True)
test2.drop('saleprice',axis=1,inplace=True)
# Compare the shapes - test2 should have 1 less column for saleprice
print(test2.shape)
print(train2.shape)
# Convert ID and PID columns to object dtype as they should not be considered a feature.
test2.id = test2.id.astype(object)
test2.pid = test2.pid.astype(object)
# Save to new .csv
test2.to_csv(index = False, path_or_buf = '../data/test2_clean.csv')
```
## Adjusting training and test data - third passthrough
Instead of dummifying the categorical columns, which did not work too well in the modelling process, we will try replacing the strings with numbers according to their type.
There are two types of categorical columns in the dataset: ordinal, where there is an order/scale, and nominal, where it is unordered.
For the nominal columns, the order does not matter, and dummifying it may be a better choice. However, to avoid overfitting the data, we will reduce the number of features, and will not use the nominal columns.
For the ordinal columns, the order does matter, and hence we could give them numbers according to their scale.
```
# Make a copy of the original dataframes
train3 = train.copy()
test3 = test.copy()
# Create a list of columns with object dtype, excluding id and pid
object_columns = list(test3.select_dtypes(include = object))
object_columns.remove('id')
object_columns.remove('pid')
# Create a list of ordinal_columns by referring to the Ames data dictionary
ordinal_columns = ['lot_shape','utilities','land_slope','exter_qual','exter_cond','bsmt_qual',
'bsmt_cond','bsmt_exposure','bsmtfin_type_1','bsmtfin_type_2','heating_qc',
'electrical_system','kitchen_qual','functional','fireplace_qual', 'garage_finish',
'garage_qual','garage_cond','paved_drive','pool_qual','fence_type']
# Create a dictionary for mapping the ordinal values, according to the Ames data dictionary.
ordinal_dict = {'None':0,'NA':0,'MnWw':1,'GdWo':2,'MnPrv':3,'GdPrv':4,'Po':1,'Fa':2,'TA':3,'Gd':4,'Ex':5,
'N':0,'P':1,'Y':2,'Unf':1,'RFn':2,'Fin':3,'Sal':0,'Sev':1,'Maj2':2,'Maj1':3,'Mod':4,'Min2':5,
'Min1':6,'Typ':7,'Mix':1,'FuseP':2,'FuseF':3,'FuseA':4,'SBrkr':5,'LwQ':2,'Rec':3,'BLQ':4,
'ALQ':5,'GLQ':6,'No':1,'Mn':2,'Av':3,'Gtl':7,'ELO':1,'NoSeWa':2,'NoSewr':3,'AllPub':4,
'IR3':1,'IR2':2,'IR1':3,'Reg':4}
# Map ordinal_dict to columns in train3 if they are ordinal columns
for col in train3:
if col in ordinal_columns:
train3[col] = train3[col].map(ordinal_dict)
# Repeat map for test3
for col in test3:
if col in ordinal_columns:
test3[col] = test3[col].map(ordinal_dict)
# Convert ID and PID columns to object dtype as they should not be considered a feature.
# Also convert ms_subclass to object as this should not be considered numerical
train3.id = train3.id.astype(object)
train3.pid = train3.pid.astype(object)
test3.id = test3.id.astype(object)
test3.pid = test3.pid.astype(object)
# Save them to .csv
train3.to_csv(index = False, path_or_buf = '../data/train3_clean.csv')
test3.to_csv(index = False, path_or_buf = '../data/test3_clean.csv')
```
|
github_jupyter
|
# Import pandas and numpy for data cleaning.
import pandas as pd
import numpy as np
# Load in the train .csv.
train = pd.read_csv('../data/train.csv')
# Convert all columns to lowercase and replace spaces in column names.
train.columns = [col.lower().replace(" ", "_") for col in train.columns]
# Print the shape and first 5 rows.
print(train.shape)
train.head()
# Check the info for nulls and dtypes.
train.info()
train.drop('garage_yr_blt', axis = 1, inplace=True)
# Create a list of columns with dtype float
float_columns = list(train.select_dtypes(include = np.float64))
# Iterate through the list to fillna for these columns with 0
[train[col].fillna(0, inplace=True) for col in float_columns]
train.head()
# Create a list of columns with dtype object
object_columns = list(train.select_dtypes(include = object))
# Iterate through the list to fillna for these columns with "None"
[train[col].fillna('None', inplace=True) for col in object_columns]
train.head()
# Convert ID and PID columns to object dtype as they should not be considered a feature.
# Also convert ms_subclass to object as this should not be considered numerical
train.id = train.id.astype(object)
train.pid = train.pid.astype(object)
train.ms_subclass = train.ms_subclass.astype(object)
# Check that there are no more NaNs
train.info()
# Rename columns for better clarity
col_names = {'street': 'street_type', 'alley': 'alley_type', 'condition_1': 'proximity_road_railroad_1',
'condition_2': 'proximity_road_railroad_2', 'exterior_1st': 'exterior_mat_1',
'exterior_2nd': 'exterior_mat_2', 'mas_vnr_type': 'masonry_veneer_type',
'mas_vnr_area': 'masonry_veneer_area', 'gr_liv_area': 'above_ground_living_area',
'totrms_abvgrd': 'total_rms_above_ground', 'fireplace_qu': 'fireplace_qual',
'3ssn_porch': 'three_season_porch', 'pool_qc': 'pool_qual', 'foundation': 'foundation_type',
'fence': 'fence_type', 'heating': 'heating_type', 'electrical': 'electrical_system',
'misc_val': 'misc_value','mo_sold': 'month_sold', 'yr_sold': 'year_sold'}
train.columns = [col_names.get(x, x) for x in train.columns]
# Save to new .csv
train.to_csv(index = False, path_or_buf = '../data/train_clean.csv')
# Repeat steps for test.csv
# Load in the train .csv.
test = pd.read_csv('../data/test.csv')
test.columns = [col.lower().replace(" ", "_") for col in test.columns]
test.drop('garage_yr_blt', axis=1, inplace=True)
# Create a list of columns with dtype float
float_columns = list(test.select_dtypes(include = np.float64))
# Iterate through the list to fillna for these columns with 0
[test[col].fillna(0, inplace=True) for col in float_columns];
# Create a list of columns with dtype object
object_columns = list(test.select_dtypes(include = object))
# Iterate through the list to fillna for these columns with "None"
[test[col].fillna('None', inplace=True) for col in object_columns];
# Convert ID and PID columns to object dtype as they should not be considered a feature.
# Also convert ms_subclass to object as this should not be considered numerical
test.id = test.id.astype(object)
test.pid = test.pid.astype(object)
test.ms_subclass = test.ms_subclass.astype(object)
# Rename columns for better clarity
col_names = {'street': 'street_type', 'alley': 'alley_type', 'condition_1': 'proximity_road_railroad_1',
'condition_2': 'proximity_road_railroad_2', 'exterior_1st': 'exterior_mat_1',
'exterior_2nd': 'exterior_mat_2', 'mas_vnr_type': 'masonry_veneer_type',
'mas_vnr_area': 'masonry_veneer_area', 'gr_liv_area': 'above_ground_living_area',
'totrms_abvgrd': 'total_rms_above_ground', 'fireplace_qu': 'fireplace_qual',
'3ssn_porch': 'three_season_porch', 'pool_qc': 'pool_qual', 'foundation': 'foundation_type',
'fence': 'fence_type', 'heating': 'heating_type', 'electrical': 'electrical_system',
'misc_val': 'misc_value','mo_sold': 'month_sold', 'yr_sold': 'year_sold'}
test.columns = [col_names.get(x, x) for x in test.columns]
# Save to new .csv
test.to_csv(index = False, path_or_buf = '../data/test_clean.csv')
test.head()
# Create a copy of the train dataset
train2 = train.copy()
# Select all the object columns (except id and pid) in a list
object_columns = list(train2.select_dtypes(include = object))
object_columns.remove('id')
object_columns.remove('pid')
# Dummify all object columns
train2 = pd.get_dummies(columns = object_columns, data = train2).astype(np.int64)
train2.shape
# Convert ID and PID columns to object dtype as they should not be considered a feature.
train2.id = train2.id.astype(object)
train2.pid = train2.pid.astype(object)
# Save to new .csv
train2.to_csv(index = False, path_or_buf = '../data/train2_clean.csv')
# Create a copy of the test dataset
test2 = test.copy()
# Select all the object columns (except id and pid) in a list
object_columns = list(test2.select_dtypes(include = object))
object_columns.remove('id')
object_columns.remove('pid')
# Dummify all object columns
test2 = pd.get_dummies(columns = object_columns, data = test2).astype(np.int64)
test2.shape
# Align columns with training set
train2, test2 = train2.align(test2, join='left', axis=1)
# Fill the new nulls with 0 for the dummified columns
test2.fillna(0, inplace=True)
test2.drop('saleprice',axis=1,inplace=True)
# Compare the shapes - test2 should have 1 less column for saleprice
print(test2.shape)
print(train2.shape)
# Convert ID and PID columns to object dtype as they should not be considered a feature.
test2.id = test2.id.astype(object)
test2.pid = test2.pid.astype(object)
# Save to new .csv
test2.to_csv(index = False, path_or_buf = '../data/test2_clean.csv')
# Make a copy of the original dataframes
train3 = train.copy()
test3 = test.copy()
# Create a list of columns with object dtype, excluding id and pid
object_columns = list(test3.select_dtypes(include = object))
object_columns.remove('id')
object_columns.remove('pid')
# Create a list of ordinal_columns by referring to the Ames data dictionary
ordinal_columns = ['lot_shape','utilities','land_slope','exter_qual','exter_cond','bsmt_qual',
'bsmt_cond','bsmt_exposure','bsmtfin_type_1','bsmtfin_type_2','heating_qc',
'electrical_system','kitchen_qual','functional','fireplace_qual', 'garage_finish',
'garage_qual','garage_cond','paved_drive','pool_qual','fence_type']
# Create a dictionary for mapping the ordinal values, according to the Ames data dictionary.
ordinal_dict = {'None':0,'NA':0,'MnWw':1,'GdWo':2,'MnPrv':3,'GdPrv':4,'Po':1,'Fa':2,'TA':3,'Gd':4,'Ex':5,
'N':0,'P':1,'Y':2,'Unf':1,'RFn':2,'Fin':3,'Sal':0,'Sev':1,'Maj2':2,'Maj1':3,'Mod':4,'Min2':5,
'Min1':6,'Typ':7,'Mix':1,'FuseP':2,'FuseF':3,'FuseA':4,'SBrkr':5,'LwQ':2,'Rec':3,'BLQ':4,
'ALQ':5,'GLQ':6,'No':1,'Mn':2,'Av':3,'Gtl':7,'ELO':1,'NoSeWa':2,'NoSewr':3,'AllPub':4,
'IR3':1,'IR2':2,'IR1':3,'Reg':4}
# Map ordinal_dict to columns in train3 if they are ordinal columns
for col in train3:
if col in ordinal_columns:
train3[col] = train3[col].map(ordinal_dict)
# Repeat map for test3
for col in test3:
if col in ordinal_columns:
test3[col] = test3[col].map(ordinal_dict)
# Convert ID and PID columns to object dtype as they should not be considered a feature.
# Also convert ms_subclass to object as this should not be considered numerical
train3.id = train3.id.astype(object)
train3.pid = train3.pid.astype(object)
test3.id = test3.id.astype(object)
test3.pid = test3.pid.astype(object)
# Save them to .csv
train3.to_csv(index = False, path_or_buf = '../data/train3_clean.csv')
test3.to_csv(index = False, path_or_buf = '../data/test3_clean.csv')
| 0.485844 | 0.945901 |
# 1-4.2 Intro Python
## Conditionals
- **`if`, `else`, `pass`**
- Conditionals using Boolean String Methods
- **Comparison operators**
- String comparisons
-----
><font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font>
- **control code flow with `if`... `else` conditional logic**
- using Boolean string methods (`.isupper(), .isalpha(), startswith()...`)
- **using comparison (`>, <, >=, <=, ==, !=`)**
- using Strings in comparisons
#
<font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font>
## comparison operators
- **`>`**
- **`<`**
- **`>=`**, **`<=`**
- **`==`**
- Assign **`=`** vs compare **`==`**
- **`!=`**
[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/cf192500-3879-4228-bd50-70dd3f38d831/Unit1_Section4.2-comparison-operators.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/cf192500-3879-4228-bd50-70dd3f38d831/Unit1_Section4.2-comparison-operators.vtt","srclang":"en","kind":"subtitles","label":"english"}])
#
<font size="6" color="#00A0B2" face="verdana"> <B>Examples</B></font>
```
# [ ] review and run code to see if 3 greater than 5
3 > 5
# [ ] review and run code to see if 3 less than or equal to 5
3 <= 5
# [ ] review and run code
# assign x equal to 3
x = 3
# test if x is equal to
x == 9
# [ ] review and run code
x = 3
print("x not equal 9 is", x != 9)
print("x equal 3 is", x == 3)
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 1</B></font>
## comparison operators
```
x = 9 + 4
# [ ] create a test to print() True or False for x is equal to 13
print(x==13)
# [ ] create a test to print True or False for 3 + 3 is greater than 2 + 4
print(3+3>2+4)
```
#
<font size="6" color="#00A0B2" face="verdana"> <B>Concepts</B></font>
## Conditionals: comparison operators with `if`
[]( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/15e3b015-e83b-4ab8-8375-b11e52a348ea/Unit1_Section4.2-conditional-if.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/15e3b015-e83b-4ab8-8375-b11e52a348ea/Unit1_Section4.2-conditional-if.vtt","srclang":"en","kind":"subtitles","label":"english"}])
Comparison operators evaluate to Boolean **`True`** and **`False`** to direct the flow of **`if`** conditionals
###
<font size="6" color="#00A0B2" face="verdana"> <B>Examples</B></font>
```
# review code and run cell
x = 21
if x > 25:
print("x is already bigger than 25")
else:
print("x was", x)
x = 25
print("now x is", x)
# review code and run cell
x = 18
if x + 18 == x + x:
print("Pass: x + 18 is equal to", x + x)
else:
print("Fail: x + 18 is not equal to", x + x)
# review code and run cell. "!" means "not"
x = 18
test_value = 18
if x != test_value:
print('x is not', test_value)
else:
print('x is', test_value)
# review code and run cell
# DON'T ASSIGN (x = 2) when you mean to COMPARE (x == 2)
x = 2
if x = 2:
print('"==" tests for, is equal to')
else:
pass
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 2</B></font>
## Evaluating a comparison operator in `if`
```
# [ ] create an if/else statement that tests if y is greater than or equal x + x
# [ ] print output: "y greater than or equal x + x is" True/False ...or a similar output
x = 3
y = x + 8
if y>=x+x:
print("y greater than or equal x+x is True")
else:
print("y greater than or equal x+x is False")
```
[Terms of use](http://go.microsoft.com/fwlink/?LinkID=206977) [Privacy & cookies](https://go.microsoft.com/fwlink/?LinkId=521839) © 2017 Microsoft
|
github_jupyter
|
# [ ] review and run code to see if 3 greater than 5
3 > 5
# [ ] review and run code to see if 3 less than or equal to 5
3 <= 5
# [ ] review and run code
# assign x equal to 3
x = 3
# test if x is equal to
x == 9
# [ ] review and run code
x = 3
print("x not equal 9 is", x != 9)
print("x equal 3 is", x == 3)
x = 9 + 4
# [ ] create a test to print() True or False for x is equal to 13
print(x==13)
# [ ] create a test to print True or False for 3 + 3 is greater than 2 + 4
print(3+3>2+4)
# review code and run cell
x = 21
if x > 25:
print("x is already bigger than 25")
else:
print("x was", x)
x = 25
print("now x is", x)
# review code and run cell
x = 18
if x + 18 == x + x:
print("Pass: x + 18 is equal to", x + x)
else:
print("Fail: x + 18 is not equal to", x + x)
# review code and run cell. "!" means "not"
x = 18
test_value = 18
if x != test_value:
print('x is not', test_value)
else:
print('x is', test_value)
# review code and run cell
# DON'T ASSIGN (x = 2) when you mean to COMPARE (x == 2)
x = 2
if x = 2:
print('"==" tests for, is equal to')
else:
pass
# [ ] create an if/else statement that tests if y is greater than or equal x + x
# [ ] print output: "y greater than or equal x + x is" True/False ...or a similar output
x = 3
y = x + 8
if y>=x+x:
print("y greater than or equal x+x is True")
else:
print("y greater than or equal x+x is False")
| 0.122209 | 0.833392 |
# Working with Datasets
In the previous labs, you used a *datastore* to provide centralized, cloud-based data access. In this lab, you'll explore *datasets*, a further abstraction that makes it easier to work with specific data for experiments and training.
## Connect to Your Workspace
The first thing you need to do is to connect to your workspace using the Azure ML SDK.
> **Note**: If the authenticated session with your Azure subscription has expired since you completed the previous exercise, you'll be prompted to reauthenticate.
```
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
```
## Prepare Data
In the previous lab, you created a datastore. Datasets are usually (though not always) based on data in datastores.
If you did not complete the previous lab, run the following code to upload two local CSV files to the default datastore in your workspace (if you *did* complete the previous lab, this will just overwrite the same files).
```
ws.get_default_datastore().upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data
target_path='diabetes-data/', # Put it in a folder path in the datastore
overwrite=True, # Replace existing files of the same name
show_progress=True)
```
## Create a Tabular Dataset
A dataset is an object that encapsulates a specific data source. Let's create a dataset from the diabetes data you uploaded to the datastore, and view the first 20 records. In this case, the data is in a structured format in a CSV file, so we'll use a *Tabular* dataset.
```
from azureml.core import Dataset
# Get the default datastore
default_ds = ws.get_default_datastore()
#Create a tabular dataset from the path on the datastore (this may take a short while)
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))
# Display the first 20 rows as a Pandas dataframe
tab_data_set.take(20).to_pandas_dataframe()
```
As you can see in the code above, it's easy to convert a tabular dataset to a Pandas dataframe, enabling you to work with the data using common python techniques.
## Create a File Dataset
The dataset you created is a *tabular* dataset that can be read as a dataframe containing all of the data in the structured files that are included in the dataset definition. This works well for tabular data, but in some machine learning scenarios you might need to work with data that is unstructured; or you may simply want to handle reading the data from files in your own code. To accomplish this, you can use a *file* dataset, which creates a list of file paths in a virtual mount point, which you can use to read the data in the files.
```
#Create a file dataset from the path on the datastore (this may take a short while)
file_data_set = Dataset.File.from_files(path=(default_ds, 'diabetes-data/*.csv'))
# Get the files in the dataset
for file_path in file_data_set.to_path():
print(file_path)
```
## Register Datasets
Now that you have created datasets that reference the diabetes data, you can register them to make them easily accessible to any experiment being run in the workspace.
We'll register the tabular dataset as **diabetes dataset**, and the file dataset as **diabetes files**.
```
# Register the tabular dataset
try:
tab_data_set = tab_data_set.register(workspace=ws,
name='diabetes dataset',
description='diabetes data',
tags = {'format':'CSV'},
create_new_version=True)
except Exception as ex:
print(ex)
# Register the file dataset
try:
file_data_set = file_data_set.register(workspace=ws,
name='diabetes file dataset',
description='diabetes files',
tags = {'format':'CSV'},
create_new_version=True)
except Exception as ex:
print(ex)
print('Datasets registered')
```
You can view and manage datasets on the **Datasets** page for your workspace in [Azure ML Studio](https://ml.azure.com). You cal also get a list of datasets from the workspace object:
```
print("Datasets:")
for dataset_name in list(ws.datasets.keys()):
dataset = Dataset.get_by_name(ws, dataset_name)
print("\t", dataset.name, 'version', dataset.version)
```
If you completed Labs 2A and 2B, you will see that registered datasets include transformations created using the visual Designer tool. You may also notice that in registering **diabetes dataset** with the same name as the dataset you created using the *Studio* interface in a previous exercise, you are creating a new *version* of the dataset. The ability to version datasets enables you to redefine datasets without breaking existing experiments or pipelines that rely on previous definitions. By default, the latest version of a named dataset is returned, but you can retrieve a specific version of a dataset by specifying the version number, like this:
```python
dataset_v1 = Dataset.get_by_name(ws, 'diabetes dataset', version = 1)
```
## Train a Model from a Tabular Dataset
Now that you have datasets, you're ready to start training models from them. You can pass datasets to scripts as *inputs* in the estimator being used to run the script.
Run the following two code cells to create:
1. A folder named **diabetes_training_from_tab_dataset**
2. A script that trains a classification model by using a tabular dataset that is passed to is as an *input*.
```
import os
# Create a folder for the experiment files
experiment_folder = 'diabetes_training_from_tab_dataset'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder, 'folder created')
%%writefile $experiment_folder/diabetes_training.py
# Import libraries
import argparse
from azureml.core import Run
import pandas as pd
import numpy as np
import joblib
import os
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# Set regularization hyperparameter (passed as an argument to the script)
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
args = parser.parse_args()
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# load the diabetes data (passed as an input dataset)
print("Loading Data...")
diabetes = run.input_datasets['diabetes'].to_pandas_dataframe()
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
```
Now you can create an estimator to run the script, and define a named *input* for the training dataset, which is read by the script.
> **Note**: The **Dataset** class is defined in the **azureml-dataprep** package (which is installed with the SDK), and this package includes optional support for **pandas** (which is used by the **to_pandas_dataframe()** method, so you need to include this package in the environment where the training experiment will be run.
```
from azureml.train.sklearn import SKLearn
from azureml.core import Experiment
from azureml.widgets import RunDetails
# Set the script parameters
script_params = {
'--regularization': 0.1
}
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes dataset")
# Create an estimator
estimator = SKLearn(source_directory=experiment_folder,
entry_script='diabetes_training.py',
script_params=script_params,
compute_target = 'local',
inputs=[diabetes_ds.as_named_input('diabetes')], # Pass the Dataset object as an input...
pip_packages=['azureml-dataprep[pandas]'] # ...so you need the dataprep package
)
# Create an experiment
experiment_name = 'diabetes-training'
experiment = Experiment(workspace = ws, name = experiment_name)
# Run the experiment
run = experiment.submit(config=estimator)
# Show the run details while running
RunDetails(run).show()
run.wait_for_completion()
```
The first time the experiment is run, it may take some time to set up the Python environment - subsequent runs will be quicker.
When the experiment has completed, in the widget, view the **azureml-logs/70_driver_log.txt** output log and the metrics generated by the run.
As with all experiments, you can view the details of the experiment run in [Azure ML Studio](https://ml.azure.com), and you can write code to retrieve the metrics and files generated:
```
# Get logged metrics
metrics = run.get_metrics()
for key in metrics.keys():
print(key, metrics.get(key))
print('\n')
for file in run.get_file_names():
print(file)
```
The model we trained is saved as the **diabetes_model.pkl** file in the **outputs** folder, so you can register it.
```
from azureml.core import Model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'SKLearn Estimator (tabular dataset)'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
```
## Train a Model from a File Dataset
You've seen how to train a model using training data in a *tabular* dataset; but what about a *file* dataset?
When you;re using a file dataset, the dataset input passed to the script represents a mount point containing file paths. How you read the data from these files depends on the kind of data in the files and what you want to do with it. In the case of the diabetes CSV files, you can use the Python **glob** module to create a list of files in the virtual mount point defined by the dataset, and read them all into Pandas dataframes that are concatenated into a single dataframe.
Run the following two code cells to create:
1. A folder named **diabetes_training_from_file_dataset**
2. A script that trains a classification model by using a file dataset that is passed to is as an *input*.
```
import os
# Create a folder for the experiment files
experiment_folder = 'diabetes_training_from_file_dataset'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder, 'folder created')
%%writefile $experiment_folder/diabetes_training.py
# Import libraries
import argparse
from azureml.core import Workspace, Dataset, Experiment, Run
import pandas as pd
import numpy as np
import joblib
import os
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import glob
# Set regularization hyperparameter (passed as an argument to the script)
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
args = parser.parse_args()
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# load the diabetes dataset
print("Loading Data...")
data_path = run.input_datasets['diabetes'] # Get the training data from the estimator input
all_files = glob.glob(data_path + "/*.csv")
diabetes = pd.concat((pd.read_csv(f) for f in all_files))
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
```
Next we need to change the way we pass the dataset to the estimator - it needs to define a mount point from which the script can read the files. For large volumes of data, you'd generally use the **as_mount** method to stream the files directly from the dataset source; but when running on local compute (as we are in this example), you need to use the **as_download** option to download the dataset files to a local folder.
Also, since the **Dataset** class is defined in the **azureml-dataprep** package, we need to include that in the experiment environment.
```
from azureml.train.sklearn import SKLearn
from azureml.core import Experiment
from azureml.widgets import RunDetails
# Set the script parameters
script_params = {
'--regularization': 0.1
}
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes file dataset")
# Create an estimator
estimator = SKLearn(source_directory=experiment_folder,
entry_script='diabetes_training.py',
script_params=script_params,
compute_target = 'local',
inputs=[diabetes_ds.as_named_input('diabetes').as_download(path_on_compute='diabetes_data')], # Pass the Dataset object as an input
pip_packages=['azureml-dataprep[pandas]'] # so we need the dataprep package
)
# Create an experiment
experiment_name = 'diabetes-training'
experiment = Experiment(workspace = ws, name = experiment_name)
# Run the experiment
run = experiment.submit(config=estimator)
# Show the run details while running
RunDetails(run).show()
run.wait_for_completion()
```
When the experiment has completed, in the widget, view the **azureml-logs/70_driver_log.txt** output log to verify that the file dataset was processed and the data files downloaded.
As with all experiments, you can view the details of the experiment run in [Azure ML Studio](https://ml.azure.com), and you can write code to retrieve the metrics and files generated:
```
# Get logged metrics
metrics = run.get_metrics()
for key in metrics.keys():
print(key, metrics.get(key))
print('\n')
for file in run.get_file_names():
print(file)
```
Once again, let's register the model that we trained.
```
from azureml.core import Model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'SKLearn Estimator (file dataset)'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
```
> **More Information**: For more information about training with datasets, see [Training with Datasets](https://docs.microsoft.com/azure/machine-learning/how-to-train-with-datasets) in the Azure ML documentation.
|
github_jupyter
|
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
ws.get_default_datastore().upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data
target_path='diabetes-data/', # Put it in a folder path in the datastore
overwrite=True, # Replace existing files of the same name
show_progress=True)
from azureml.core import Dataset
# Get the default datastore
default_ds = ws.get_default_datastore()
#Create a tabular dataset from the path on the datastore (this may take a short while)
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))
# Display the first 20 rows as a Pandas dataframe
tab_data_set.take(20).to_pandas_dataframe()
#Create a file dataset from the path on the datastore (this may take a short while)
file_data_set = Dataset.File.from_files(path=(default_ds, 'diabetes-data/*.csv'))
# Get the files in the dataset
for file_path in file_data_set.to_path():
print(file_path)
# Register the tabular dataset
try:
tab_data_set = tab_data_set.register(workspace=ws,
name='diabetes dataset',
description='diabetes data',
tags = {'format':'CSV'},
create_new_version=True)
except Exception as ex:
print(ex)
# Register the file dataset
try:
file_data_set = file_data_set.register(workspace=ws,
name='diabetes file dataset',
description='diabetes files',
tags = {'format':'CSV'},
create_new_version=True)
except Exception as ex:
print(ex)
print('Datasets registered')
print("Datasets:")
for dataset_name in list(ws.datasets.keys()):
dataset = Dataset.get_by_name(ws, dataset_name)
print("\t", dataset.name, 'version', dataset.version)
dataset_v1 = Dataset.get_by_name(ws, 'diabetes dataset', version = 1)
import os
# Create a folder for the experiment files
experiment_folder = 'diabetes_training_from_tab_dataset'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder, 'folder created')
%%writefile $experiment_folder/diabetes_training.py
# Import libraries
import argparse
from azureml.core import Run
import pandas as pd
import numpy as np
import joblib
import os
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# Set regularization hyperparameter (passed as an argument to the script)
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
args = parser.parse_args()
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# load the diabetes data (passed as an input dataset)
print("Loading Data...")
diabetes = run.input_datasets['diabetes'].to_pandas_dataframe()
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
from azureml.train.sklearn import SKLearn
from azureml.core import Experiment
from azureml.widgets import RunDetails
# Set the script parameters
script_params = {
'--regularization': 0.1
}
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes dataset")
# Create an estimator
estimator = SKLearn(source_directory=experiment_folder,
entry_script='diabetes_training.py',
script_params=script_params,
compute_target = 'local',
inputs=[diabetes_ds.as_named_input('diabetes')], # Pass the Dataset object as an input...
pip_packages=['azureml-dataprep[pandas]'] # ...so you need the dataprep package
)
# Create an experiment
experiment_name = 'diabetes-training'
experiment = Experiment(workspace = ws, name = experiment_name)
# Run the experiment
run = experiment.submit(config=estimator)
# Show the run details while running
RunDetails(run).show()
run.wait_for_completion()
# Get logged metrics
metrics = run.get_metrics()
for key in metrics.keys():
print(key, metrics.get(key))
print('\n')
for file in run.get_file_names():
print(file)
from azureml.core import Model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'SKLearn Estimator (tabular dataset)'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
import os
# Create a folder for the experiment files
experiment_folder = 'diabetes_training_from_file_dataset'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder, 'folder created')
%%writefile $experiment_folder/diabetes_training.py
# Import libraries
import argparse
from azureml.core import Workspace, Dataset, Experiment, Run
import pandas as pd
import numpy as np
import joblib
import os
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import glob
# Set regularization hyperparameter (passed as an argument to the script)
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
args = parser.parse_args()
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# load the diabetes dataset
print("Loading Data...")
data_path = run.input_datasets['diabetes'] # Get the training data from the estimator input
all_files = glob.glob(data_path + "/*.csv")
diabetes = pd.concat((pd.read_csv(f) for f in all_files))
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
from azureml.train.sklearn import SKLearn
from azureml.core import Experiment
from azureml.widgets import RunDetails
# Set the script parameters
script_params = {
'--regularization': 0.1
}
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes file dataset")
# Create an estimator
estimator = SKLearn(source_directory=experiment_folder,
entry_script='diabetes_training.py',
script_params=script_params,
compute_target = 'local',
inputs=[diabetes_ds.as_named_input('diabetes').as_download(path_on_compute='diabetes_data')], # Pass the Dataset object as an input
pip_packages=['azureml-dataprep[pandas]'] # so we need the dataprep package
)
# Create an experiment
experiment_name = 'diabetes-training'
experiment = Experiment(workspace = ws, name = experiment_name)
# Run the experiment
run = experiment.submit(config=estimator)
# Show the run details while running
RunDetails(run).show()
run.wait_for_completion()
# Get logged metrics
metrics = run.get_metrics()
for key in metrics.keys():
print(key, metrics.get(key))
print('\n')
for file in run.get_file_names():
print(file)
from azureml.core import Model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'SKLearn Estimator (file dataset)'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
| 0.599016 | 0.980356 |
# <a href="https://colab.research.google.com/github/tvml/ml2021/blob/main/codici/loss.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy as scipy
import scipy.special as sp
import pandas as pd
import urllib.request
colors = ["xkcd:dusty blue", "xkcd:dark peach", "xkcd:dark seafoam green",
"xkcd:dusty purple","xkcd:watermelon", "xkcd:dusky blue", "xkcd:amber",
"xkcd:purplish", "xkcd:dark teal", "xkcd:orange", "xkcd:slate"]
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['image.cmap'] = 'jet'
plt.rcParams['image.interpolation'] = 'none'
plt.rcParams['figure.figsize'] = (16, 8)
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 8
filepath = "../dataset/"
url = "https://tvml.github.io/ml2021/dataset/"
def get_file(filename,local):
if local:
return filepath+filename
else:
urllib.request.urlretrieve (url+filename, filename)
return filename
def plot_ds(data,m=None,q=None):
fig = plt.figure(figsize=(16,8))
minx, maxx = min(data.x1), max(data.x1)
deltax = .1*(maxx-minx)
x = np.linspace(minx-deltax,maxx+deltax,1000)
ax = fig.gca()
ax.scatter(data[data.t==0].x1, data[data.t==0].x2, s=40, edgecolor='k', alpha=.7)
ax.scatter(data[data.t==1].x1, data[data.t==1].x2, s=40, edgecolor='k', alpha=.7)
if m:
ax.plot(x, m*x+q, lw=2, color=colors[5])
plt.xlabel('$x_1$', fontsize=12)
plt.ylabel('$x_2$', fontsize=12)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.title('Dataset', fontsize=12)
plt.show()
def plot_all(cost_history, m, q, low, high, step):
idx = range(low,high,step)
ch = cost_history[idx]
th1 = m[idx]
th0 = q[idx]
fig = plt.figure(figsize=(18,6))
ax = fig.add_subplot(1,2,1)
minx, maxx, miny, maxy = 0, len(ch), ch.min(), ch.max()
deltay, deltax = .1*(maxy-miny), .1*(maxx-minx)
miny, maxy, minx, maxx = miny - deltay, maxy + deltay, minx - deltax, maxx + deltax
ax.plot(range(len(ch)), ch, alpha=1, color=colors[0], linewidth=2)
plt.xlabel('iterazioni')
plt.ylabel('costo')
plt.xlim(minx,maxx)
plt.ylim(miny,maxy)
ax.xaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, pos: '{:0.0f}'.format(x*step+low)))
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
ax = fig.add_subplot(1,2,2)
minx, maxx, miny, maxy = th0.min(), th0.max(), th1.min(), th1.max()
deltay, deltax = .1*(maxy-miny), .1*(maxx-minx)
miny, maxy = miny - deltay, maxy + deltay
miny, maxy, minx, maxx = miny - deltay, maxy + deltay, minx - deltax, maxx + deltax
ax.plot(th0, th1, alpha=1, color=colors[1], linewidth=2, zorder=1)
ax.scatter(th0[-1],th1[-1], color=colors[5], marker='o', s=40, zorder=2)
plt.xlabel(r'$m$')
plt.ylabel(r'$q$')
plt.xlim(minx,maxx)
plt.ylim(miny,maxy)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.tight_layout()
plt.show()
```
### Rischio e minimizzazione
Dato un qualunque algoritmo che fornisce per ogni valore di input $x$ una previsione $f(x)$, la qualità delle previsioni fornite dall'algoritmo può essere definita per mezzo di una *funzione di costo* (loss function) $L(x_1, x_2)$, dove $x_1$ è il valore predetto dal modello e $x_2$ è il valore corretto associato a $x$ . Sostanzialmente, il valore della funzione di costo $L(f(x),y)$ misura quindi quanto "costa" (secondo il modello di costo indotto dalla funzione stessa) prevedere, dato $x$, il valore $f(x)$ invece del valore corretto $y$.
Dato che evidentemente il costo è dipendente dalla coppia di valori $x,y$, una valutazione complessiva della qualità delle predizioni dell'algoritmo potrà essere fornita considerando il valore atteso della funzione di costo al variare di $x$ e $y$, nell'ipotesi di una (densità di) distribuzione di probabilità congiunta di tali valori $p(x,y)$. La distribuzione $p(x,y)$ ci fornisce quindi la probabilità che il prossimo punto su cui effettuare la predizione sia $x$ e che il valore corretto da predire sia $y$. Si noti che non si fa l'ipotesi che due diverse occorrenze di $x$ siano associate allo stesso valore di $y$: non si assume quindi una relazione funzionale, seppure sconosciuta, tra $x$ e $y$, ma solo una relazione in probabilità $p(y\mid x)$.
Questo permette di considerare la presenza di rumore nelle osservazioni effettuate.
Da quanto detto, indicando con $D_x$ e $D_y$ i domini di definizione di $x$ e $y$, e assunta una distribuzione $p(x,y)$ che fornisce un modello statistico del contesto in cui si intende effettuare le predizioni, la qualità di un algoritmo di previsione che calcola la funzione $f(x)$ sarà data dal *rischio*
$$
\mathcal{R}(f)=\mathbb{E}_p[L(f(x),y)]=\int_{D_x}\int_{D_y} L(f(x),y)p(x,y)dxdy
$$
Il rischio di dice quindi quanto ci aspettiamo che ci costi prevedere $f(x)$, assumendo che:
1. $x$ sia estratto a caso dalla distribuzione marginale
$$
p(x)=\int_{D_y} p(x,y)dy
$$
2. il relativo valore corretto da predire sia estratto a caso dalla distribuzione condizionata
$$
p(y\mid x)=\frac{p(x,y)}{p(x)}
$$
3. il costo sia rappresentato dalla funzione $L(x_1,x_2)$
##### Esempio
Consideriamo il caso in cui vogliamo effettuare previsioni sulla possibilità di pioggia in giornata, date le condizioni del cielo al mattino, assumendo che le possibili osservazioni siano "sereno" (S), "nuvoloso" (N), "coperto" (C), e che le previsioni siano "pioggia" (T) e "non pioggia" (F). La funzione di costo, sarà allora del tipo $L:\{T,F\}^2\mapsto\mathbb{R}$
La definizione di una particolare funzione di costo è legata alla valutazione delle priorità dell'utente. Nel caso specifico, se si valuta allo stesso modo "sgradevole" uscire con l'ombrello (per una previsione T) senza poi doverlo usare che bagnarsi per la pioggia non avendo preso l'ombrello (per una previsione F) allora la funzione di costo risulta $L_1(x_1,x_2)$, definita dalla tabella seguente
| $x_1$/$x_2$ | T | F |
| :---------: | :--: | :--: |
| T | 0 | 1 |
| F | 1 | 0 |
Se invece reputiamo molto più sgradevole bagnarci per non aver preso l'ombrello rispetto a prendere l'ombrello stesso inutilmente, allora la funzione di costo $L_2(x_1,x_2)$, potrà essere definita come
| $x_1$/$x_2$ | T | F |
| :---------: | :--: | :--: |
| T | 0 | 1 |
| F | 25 | 0 |
Se facciamo l'ipotesi che la distribuzione congiunta su $\{S,N,C\}\times\{T,F\}$ sia
| $x$/$y$ | T | F |
| :-----: | :--: | :--: |
| S | .05 | .2 |
| N | .25 | .25 |
| C | .2 | .05 |
e consideriamo due possibili funzioni predittive $f_1(x)$ e $f_2(x)$
| $x$ | $f_1(x)$ | $f_2(x)$ |
| :--: | :--------------------: | :------: |
| S | F | F |
| N | F | T |
| C | T | T |
possiamo verificare che nel caso in cui la funzione di costo sia $L_1$ allora il rischio nei due casi è $\mathcal{R}(f_1)=0.65$ e $\mathcal{R}(f_2)=0.4$ per cui $f_2$ è preferibile a $f_1$. Al contrario, se la funzione di costo è $L_2$, allora risulta $\mathcal{R}(f_1)=1.55$ e $\mathcal{R}(f_2)=7.55$, per cui, al contrario, $f_1$ è preferibile a $f_2$.
Come si vede, quindi, la scelta tra $f_1(x)$ e $f_2(x)$ è dipendente dalla funzione di costo adottata e dalla distribuzione $p(x,y)$ che invece è data e, tra l'altro, sconosciuta. Quindi, una diversa distribuzione potrebbe portare a conclusioni diverse anche considerando una stessa funzione di costo: se ad esempio si fa riferimento alla funzione di costo $L_1$, allora la distribuzione congiunta
| $x$/$y$ | T | F |
| :-----: | :--: | :--: |
| S | .05 | .05 |
| N | .05 | .4 |
| C | .05 | .4 |
determina dei valori di rischio $\mathcal{R}(f_1)=0.6$ e $\mathcal{R}(f_2)=0.9$, rendendo ora $f_1$ preferibile a $f_2$.
#### Rischio empirico
Dato che la distribuzione reale $p(x,y)$ è sconosciuta per ipotesi (se così non fosse potremmo sempre effettuare predizioni utilizzando la distribuzione condizionata reale $p(y\mid x)$) il calcolo del rischio reale è impossibile ed è necessario effettuare delle approssimazioni, sulla base dei dati disponibili. In particolare, possiamo applicare il metodo standard di utilizzando la media aritmetica su un campione come stimatore del valore atteso, e considerare il *rischio empirico* (empirical risk) calcolato effettuando l'operazione di media sul campione offerto dai dati disponibili nel training set $X=\{(x_1,y_1),\ldots,(x_n,y_n)\}$
$$
\overline{\mathcal{R}}(f; X)=\overline{L}(f(x), y; X)=\frac{1}{n}\sum_{i=1}^nL(f(x_i),y_i)
$$
La funzione utilizzata per le predizioni sarà allora quella che, nell'insieme di funzioni considerato, minimizza il rischio empirico
$$
f^*=\underset{f\in F}{\mathrm{argmin}}\;\overline{\mathcal{R}}(f;X)
$$
Si noti che, in effetti, il rischio empirico dipende sia dai dati in $X$ che dalla funzione $f$: in questo senso è una funzione rispetto a $X$ e un funzionale rispetto a $f$. La ricerca di $f^*$ comporta quindi una minimizzazione funzionale del rischio empirico. In generale, tale situazione viene semplificata limitando la ricerca all'interno di classi di funzioni definite da coefficienti: in questo modo, il rischio empirico può essere espresso come funzione dei coefficienti della funzione (oltre che di $X$) e la minimizzazione è una normale minimizzazione di funzione.
Chiaramente, la speranza è che minimizzare il rischio empirico dia risultati simili a quelli che si otterrebbero minimizzando il rischio reale. Ciò dipende, in generale, da quattro fattori:
- La dimensione del training set $X$. Al crescere della quantità di dati, $\overline{\mathcal{R}}(f; X)$ tende a $\mathcal{R}(f)$ per ogni funzione $f$
- La distribuzione reale $p(x,y)$. Maggiore è la sua complessità, maggiore è la quantità di dati necessari per averne una buona approssimazione.
- La funzione di costo $L$, che può creare problemi se assegna costi molto elevati in situazioni particolari e poco probabili
- L'insieme $F$ delle funzioni considerate. Se la sua dimensione è elevata, e le funzioni hanno una struttura complessa, una maggior quantità di dati risulta necessaria per avere una buona approssimazione.
Al tempo stesso, considerare un insieme piccolo di funzioni semplici rende sì la minimizzazione del rischio implicito su $F$ una buona approssimazione del minimo rischio reale su $F$ stesso, ma al tempo stesso comporta che tale minimo possa essere molto peggiore di quello ottenibile considerando classi più ampie di funzioni.
### Minimizzazione della funzione di rischio
In generale, l'insieme $F$ delle funzioni è definito in modo parametrico $F=\{f(x;\theta)\}$ dove $\theta\in D_\theta$ è un coefficiente (tipicamente multidimensionale) che determina, all'interno della classe $F$ (definita tipicamente in termini ''strutturali'') la particolare funzione utilizzata. Un esempio tipico è offerto dalla *regressione lineare*, in cui si vuole prevedere il valore di un attributo $y$ con dominio $R$ sulla base dei valori di altri $m$ attributi $x_1,\ldots, x_m$ (che assumiamo per semplicità in $R$ anch'essi): nella regressione lineare, l'insieme delle possibili funzioni $f:R^m\mapsto R$ è limitato alle sole funzioni lineari $f_\mathbf{w}(x)=w_0+w_1x_1+\ldots+w_mx_m$, e il parametro $\theta$ corrisponde al vettore $\mathbf{w}=(w_0,\ldots,w_m)$ dei coefficienti.
In questo caso, il rischio empirico, fissata la famiglia $F$ di funzioni, può essere ora inteso come funzione di $\theta$
$$
\overline{\mathcal{R}}(\theta; X)=\overline{L}(f(x;\theta), y; X)=\frac{1}{n}\sum_{i=1}^nL(f(x_i;\theta),y_i)\hspace{2cm}f\in F
$$
e la minimizzazione del rischio empirico può essere effettuata rispetto a $\theta$
$$
\theta^*=\underset{\theta\in D_\theta}{\mathrm{argmin}}\;\overline{\mathcal{R}}(\theta;X)
$$
da cui deriva la funzione ottima (nella famiglia $F$) $f^*=f(x;\theta^*)$
la minimizzazione della funzione di rischio avrà luogo nel dominio di definizione $D_\theta$ di $\theta$, e potrà essere effettuata in modi diversi, in dipendenza della situazione e di considerazioni di efficienza di calcolo e di qualità delle soluzioni derivate.
#### Ricerca analitica dell'ottimo
Se il problema si pone in termini di minimizzazione *senza vincoli*, e quindi all'interno di $R^m$, un primo approccio è quello standard dell'analisi di funzioni, consistente nella ricerca di valori $\overline\theta$ di $\theta$ per i quali si annullano tutte le derivate parziali $\frac{\partial \overline{\mathcal{R}}(\theta; X)}{\partial \theta_i}$, tale cioè che, se indichiamo con $m$ la dimensione (numero delle componenti) di $\theta$, il sistema su $m$ incognite definito dalle $m$ equazioni
$$
\frac{\partial \overline{\mathcal{R}}(\theta; X)}{\partial \theta_i}\Bigr|_{\theta=\overline\theta}=0\hspace{2cm} i=1,\ldots,m
$$
risulta soddisfatto. La soluzione analitica di questo sistema risulta tipicamente ardua o impossibile, per cui vengono spesso adottate tecniche di tipo numerico.
##### Gradient descent
La discesa del gradiente (*gradient descent*) è una delle tecniche di ottimizzazione più popolari, in particolare nel settore del Machine Learning e delle Reti Neurali. La tecnica consiste nel minimizzare una funzione obiettivo $J(\theta)$ definita sui parametri $\theta\in\mathbb{R}^d$ del modello mediante aggiornamenti successivi del valore di $\theta$ (a partire da un valore iniziale $\theta^{(0)}$) nella direzione opposta a quella del valore attuale del gradiente $J'(\theta)=\nabla J(\theta)$. Si ricorda, a tale proposito, che, data una funzione $f(x_1,x_2,\ldots,x_d)$, il gradiente $\nabla f$ di $f$ è il vettore $d$-dimensionale delle derivate di $f$ rispetto alle variabili $x_1,\ldots, x_d$: il vettore cioè tale che $[\nabla f]_i=\frac{\partial f}{\partial x_i}$. Un parametro $\eta$, detto *learning rate* determina la scala degli aggiornamenti effettuati, e quindi la dimensione dei passi effettuati nella direzione di un minimo locale.
Possiamo interpretare la tecnica come il muoversi sulla superficie della funzione $J(\theta)$ seguendo sempre la direzione di massima pendenza verso il basso, fino a raggiungere un punto da cui è impossibile scendere ulteriormente.
##### Varianti di discesa del gradiente
In molti casi, e sempre nell'ambito del ML, la funzione obiettivo corrisponde all'applicazione di una funzione di costo (*loss function*), predefinita e dipendente dal modello adottato, su un insieme dato di elementi di un dataset $X=(x_1,\ldots, x_n)$ (che nel caso di apprendimento supervisionato è un insieme di coppie $X=((x_1,t_1),\ldots,(x_n,t_n))$): rappresentiamo questa situazione con $J(\theta; X)$. Questo corrisponde all'approssimazione del *rischio*
$$
\mathcal{R}(\theta)=\int J(\theta,x)p(x)dx=E_{p}[\theta]
$$
In generale, la funzione di costo è definita in modo additivo rispetto agli elementi di $X$ (il costo relativo all'insieme $X$ è pari alla somma dei costi relativi ai suoi elementi), per cui il valore risulta $J(\theta;X)=\sum_{i=1}^nJ(\theta;x_i)$, o preferibilmente, per evitare una eccessiva dipendenza dal numero di elementi, come media
$$J(\theta;X)=\frac{1}{n}\sum_{i=1}^nJ(\theta;x_i)$$
Si noti che, per le proprietà dell'operazione di derivazione, da questa ipotesi deriva l'additività anche del gradiente, per cui
$$J'(\theta; X)=\sum_{i=1}^nJ'(\theta;x_i)$$ o $$J'(\theta;X)=\frac{1}{n}\sum_{i=1}^nJ'(\theta;x_i)$$
Possiamo allora identificare tre varianti del metodo, che differiscono tra loro per la quantità di elementi di $X$ utilizzati, ad ogni passo, per calcolare il gradiente della funzione obiettivo. Una quantità maggiore di dati utilizzati aumenta l'accuratezza dell'aggiornamento, ma anche il tempo necessario per effettuare l'aggiornamento stesso (in particolare, per valutare il gradiente per il valore attuale di $\theta$).
###### Batch gradient descent
In questo caso, il gradiente è valutato, ogni volta, considerando tutti gli elementi nel training set $X$. Quindi si ha che al passo $k$-esimo viene eseguito l'aggiornamento
$$
\theta^{(k+1)}=\theta^{(k)}-\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)
$$
o anche, per i singoli coefficienti
$$
\theta_j^{(k+1)}=\theta_j^{(k)}-\eta\sum_{i=1}^n\frac{\partial J(\theta;x_i)}{\partial\theta_j}\Bigr\vert_{\small\theta=\theta^{(k)}}
$$
Dato che si richiede quindi, ad ogni iterazione, la valutazione del gradiente (con il valore attuale $\theta^{(k)}$ di tutti i coefficienti) su tutti gli elementi di $X$, questa soluzione tende ad essere molto lenta, soprattutto in presenza di dataset di dimensioni molto estese, come nel caso di reti neurali complesse e deep learning. Inoltre, l'approccio diventa del tutto impraticabile se il dataset è talmente esteso da non entrare neanche in memoria.
In termini di codice, il metodo batch gradient descent si presenta come:
```python
for i in range(n_epochs):
g = 0
for k in range(dataset_size):
g = g+evaluate_gradient(loss_function, theta, X[k])
theta = theta-eta*g
```
Il ciclo viene eseguito un numero di volte pari al numero di epoche, dove per *epoca* si intende una iterazione su tutti gli
elementi di $X$. Di conseguenza, la valutazione di $\theta$ viene aggiornata un numero di volte pari al numero di epoche. Il
metodo batch gradient descent converge certamente al minimo globale se la funzione $J(\theta)$ è convessa, mentre altrimenti
converge a un minimo locale.
##### Esempio
Applichiamo le considerazioni a un semplice problema di classificazione su un dataset bidimensionale, riportato graficamente di seguito.
```
data = pd.read_csv(get_file("testSet.txt", local=0), delim_whitespace=True, header=None, names=['x1','x2','t'])
plot_ds(data)
n = len(data)
nfeatures = len(data.columns)-1
X = np.array(data[['x1','x2']])
t = np.array(data['t']).reshape(-1,1)
X = np.column_stack((np.ones(n), X))
```
Il metodo considerato per la classificazione è la *logistic regression*, che determina un iperpiano (retta, in questo caso) di separazione minimizzando rispetto al vettore $\theta$ dei coefficienti dell'equazione dell'iperpiano (3 in questo caso) il rischio empirico sul dataset associato alla funzione di costo *cross-entropy*, per la quale il costo associato a un singolo elemento $x=(x_1,\ldots,x_d)$ è
$$ J(\theta, x)=-\left(t\log y + (1-t)\log (1-y)\right) $$
dove $t$ è il valore *target* è il valore $0/1$ della classe dell'elemento e $y\in (0,1)$ è il valore predetto dal modello, definito come
$$
y = \sigma(x) = \frac{1}{1+e^{-\sum_{i=1}^d\theta_ix_i+\theta_0}}
$$
```
def sigma(theta, X):
return sp.expit(np.dot(X, theta))
```
Il rischio empirico associato all'intero dataset può essere allora definito come la corrispondente media
$$
J(\theta, X)=\frac{1}{n}\sum_{i=1}^n \left(t_i\log \sigma(x_i) -(1-t_i)\log (1-\sigma(x_i))\right)
$$
```
def approx_zero(v):
eps = 1e-50
v[v<eps]=eps
return v
def cost(theta, X, t):
eps = 1e-50
v = sigma(theta,X)
v[v<eps]=eps
term1 = np.dot(np.log(v).T,t)
v = 1.0 - sigma(theta,X)
v[v<eps]=eps
term2 = np.dot(np.log(v).T,1-t)
return ((-term1 - term2) / len(X))[0]
```
Il gradiente della funzione di costo risulta allora pari a
\begin{align*}
\frac{\partial J(\theta,x)}{\partial\theta_i}&=-(t-\sigma(x))x_i\hspace{1cm}i=1,\ldots,d\\
\frac{\partial J(\theta,x)}{\partial\theta_0}&=-(t-\sigma(x))
\end{align*}
e il corrispondente gradiente del rischio empirico è dato da
\begin{align*}
\frac{\partial J(\theta,X)}{\partial\theta_i}&=-\frac{1}{n}\sum_{j=1}^n (t_j-\sigma(x_j))x_{ji}\hspace{1cm}i=1,\ldots,d\\
\frac{\partial J(\theta,X)}{\partial\theta_0}&=-\frac{1}{n}\sum_{i=1}^n(t_j-\sigma(x_j))
\end{align*}
```
def gradient(theta, X, t):
return -np.dot(X.T, (t-sigma(theta, X))) / len(X)
```
Per quanto detto, una iterazione di BGD corrisponde agli aggiornamenti
\begin{align*}
\theta_j^{(k+1)}&=\theta_j^{(k)}-\eta\frac{\partial J(\theta,X)}{\partial\theta_j}{\LARGE\vert}_{\small\theta=\theta^{(k)}}=\theta_j^{(k)}+\frac{\eta}{n}\sum_{i=1}^n (t_i-\sigma(x_i))x_{ij}\hspace{1cm}j=1,\ldots,d\\
\theta_0^{(k+1)}&=\theta_0^{(k)}-\eta\frac{\partial J(\theta,X)}{\partial\theta_0}{\LARGE\vert}_{\small\theta=\theta^{(k)}}=\theta_0^{(k)}+\frac{\eta}{n}\sum_{i=1}^n(t_i-\sigma(x_i))
\end{align*}
```
def batch_gd(X, t, eta = 0.1, epochs = 10000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for k in range(epochs):
theta = theta - eta * gradient(theta,X,t)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, theta_history, m, q
```
Applicando il metodo sul dataset, fissando un valore per il parametro $\eta$ e per il numero di epoche (dove una epoca corrisponde all'applicazione dell'iterazione su tutti gli elementi del dataset), otteniamo le sequenze dei costi e dei valori di coefficiente angolare e termine noto della retta di separazione.
```
cost_history, theta_history, m, q = batch_gd(X, t, eta = 0.1, epochs = 100000)
```
La convergenza regolare del metodo è evidente nella figura seguente, dove si mostrano un andamento tipico della funzione di costo
rispetto al numero di iterazioni e la sequenza di valori assunti da $\theta$, considerata bidimensionale.
```
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
m_star = 0.62595499
q_star = 7.3662299
f = lambda i: np.sqrt((m_star-m[i])**2+(q_star-q[i])**2)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
```
Di seguito, la retta di separazione risultante:
```
plot_ds(data,m[-1],q[-1])
```
#### Stochastic gradient descent
Nella stochastic gradient descent, a differenza del caso precedente, la valutazione del gradiente effettuata ad ogni iterazione fa riferimento a un solo elemento $x_i$ del training set. Quindi si ha
$$
\theta^{(k+1)}=\theta^{(k)}-\eta J'(\theta^{(k)};x_i)
$$
e, per i singoli coefficienti,
$$
\theta_j^{(k+1)}=\theta_j^{(k)}-\eta\frac{\partial J(\theta;x_i)}{\partial\theta_j}\LARGE\vert_{\small\theta=\theta^{(k)}}
$$
La discesa del gradiente batch valuta il gradiente per tutti gli elementi, anche quelli simili tra loro, a ogni iterazione,
eseguendo così un insieme ridondante di operazioni. SGD risolve questo problema effettuando una sola valutazione, e quindi
operando in modo più veloce.
Al tempo stesso, però, mentre i valori della funzione di costo nel caso di BGD decrescono con regolarità verso il minimo locale,
applicando SGD si riscontra un andamento molto più irregolare, con fluttuazione della funzione di costo intorno a un trend
complessivo di decrescita, ma con incrementi locali anche significativi. Questo da un lato può non risultare negativo, in quanto
le oscillazioni locali posso consentire di uscire dall'intorno di un minimo locale, proseguendo la ricerca di nuovi minimi. Al
tempo stesso, l'oscillazione locale rende difficile la convergenza finale verso il minimo.
Questa oscillazione si riscontra anche nell'andamento dei valori dei coefficienti. Si noti comunque che, considerando la sequenza dei valori della funzione di costo assunti al termine di ogni *epoca* (sequenza
delle iterazioni che considerano tutti gli elementi del dataset), emerge la tendenza di decrescita di fondo.
In termini di codice, il metodo stochastic gradient descent si presenta come:
```python
for i in range(n_epochs):
np.random.shuffle(data)
for k in range(dataset_size):
g = evaluate_gradient(loss_function, theta, X[k])
theta = theta-eta*g
```
Nel caso della logistic regression, l'aggiornamento a ogni iterazione risulta quindi
\begin{align*}
\theta_j^{(k+1)}&=\theta_j^{(k)}+\eta(t_i-\sigma(x_i))x_{ij}\hspace{1cm}j=1,\ldots,d\\
\theta_0^{(k+1)}&=\theta_0^{(k)}+\eta(t_i-\sigma(x_i))
\end{align*}
```
def stochastic_gd(X, t, eta = 0.01, epochs = 1000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for j in range(epochs):
for i in range(n):
e = (t[i] - sigma(theta, X[i,:]))[0]
theta = theta + eta * e * X[i,:].reshape(-1,1)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, theta_history, m, q
```
Applicando il metodo è necessario ancora specificare il valore di $\theta$ e il numero di epoche. Per la struttura dell'algoritmo, si avranno allora un numero di iterazioni pari al numero di epoche moltiplicato per la dimensionae $n$ del dataset.
```
cost_history, theta_history, m, q = stochastic_gd(X, t, eta = 0.01, epochs = 10000)
low, high, step = 0*n, 150*n, 30
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
```
Come si può vedere dalla figura seguente, considerando i valori di costo e dei coefficienti soltanto alla fine delle varie epoche risulta una andamento uniforme dei valori stessi.
```
low, high, step = 0*n, 1000*n, n
plot_all(cost_history, m, q, low, high, step)
```
#### Mini-batch gradient descent
Questo approccio si pone in posizione intermedia rispetto ai due precedenti, generalizzando l'impostazione di SGD di considerare un solo elemento per iterazione a considerare sottoinsiemi diversi del dataset. L'algoritmo opera quindi partizionando, all'inizio di ogni epoca, il dataset in $\lceil n/s\rceil$ sottoinsiemi (*mini-batch*) di dimensione prefissata $s$, ed effettuando poi $\lceil n/s\rceil$ iterazioni all'interno di ognuna delle quali l'aggiornamento di $\theta$ viene effettuato valutando il gradiente sugli $s$ elementi del mini-batch attuale.
La discesa del gradiente con mini-batch è l'algoritmo tipicamente utilizzato per l'addestramento di reti neurali, in particolare in presenza di reti *deep*.
Se indichiamo con $X_i\subset X$ il mini-batch attualmente considerato, l'aggiornamento a ogni iterazione è il seguente
$$
\theta^{(k+1)}=\theta^{(k)}-\eta\sum_{x\in X_i}J'(\theta^{(k)};x)
$$
o anche
$$
\theta_j^{(k+1)}=\theta_j^{(k)}-\eta\sum_{x\in X_i}\frac{\partial J(\theta;x)}{\partial\theta_j}\LARGE\vert_{\small\theta=\theta^{(k)}}
$$
In questo modo, la varianza degli aggiornamenti dei coefficienti viene diminuita. Inoltre, è possibile fare uso, in pratica, di implementazioni molto efficienti del calcolo del gradiente rispetto a un mini-batch disponibili nelle più recenti librerie per il *deep learning*. La dimensione dei mini-batch varia tra $50$ e $256$.
```python
for i in range(n_epochs):
np.random.shuffle(data)
for batch in get_batches(dataset, batch_size):
g = 0
for x in batch:
g = g+evaluate_gradient(loss_function, theta, batch)
theta = theta-eta*g
```
Ne risulta un andamento oscillante sia della funzione di costo che dei valori stimati dei coefficienti. Chiaramente, l'oscillazione sarà tanto più marcata quanto minore è la dimensione dei mini-batch, e quindi quanto più si tende a SGD.

Gli aggiornamenti nel caso della logistic regression derivano immediatamente da quanto sopra
\begin{align*}
\theta_j^{(k+1)}&=\theta_j^{(k)}+\eta\sum_{x_i\in MB}( t_i-y_i)x_{ij}\hspace{1cm}j=1,\ldots,d\\
\theta_0^{(k+1)}&=\theta_0^{(k)}+\eta\sum_{x_i\in MB}(t_i-y_i)
\end{align*}
```
def mb_gd(X, t, eta = 0.01, epochs = 1000, minibatch_size = 5):
mb = int(np.ceil(float(n)/minibatch_size))
idx = np.arange(0,n)
np.random.shuffle(idx)
theta = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
cost_history_iter = []
for j in range(epochs):
for k in range(mb-1):
g = 0
for i in idx[k*minibatch_size:(k+1)*minibatch_size]:
e = (t[i] - sigma(theta, X[i,:]))[0]
g = g + e * X[i,:]
theta = theta + eta * g.reshape(-1,1)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
g = 0
for i in idx[k*minibatch_size:n]:
e = (t[i] - sigma(theta, X[i,:]))[0]
g = g + e * X[i,:]
theta = theta + eta * g.reshape(-1,1)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, m, q
cost_history, m, q = mb_gd(X, t, eta = 0.01, epochs = 10000, minibatch_size = 5)
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
```
#### Criticità
I metodi elementari di discesa del gradiente illustrati sopra non garantiscono in generale una elevata convergenza. Inoltre, il loro utilizzo pone un insieme di questioni
- la scelta del valore del learning rate $\eta$ può risultare difficile. Un valore troppo piccolo può comportare una convergenza eccessivamente lenta, mentre un valore troppo grande può portare ad oscillazioni intorno al minimo, o addirittura a divergenza
- per ovviare a questa problematica è possibile utilizzare dei metodi di aggiustamento di $\eta$ nel tempo, ad esempio riducendolo secondo uno schema predefinito o quando il decremento della funzione di costo calcolata in due epoche successive risulti inferiore a una soglia data. Sia gli schemi che le soglie devono però essere predefiniti e non possono quindi adattarsi in dipendenza delle caratteristiche del dataset
- lo stesso learning rate si applica per l'aggiornamento di tutti i coefficienti
- in molti casi la funzione di costo, in particolare se si ha a che fare con reti neurali, risulta fortemente non convessa, caratterizzata quindi da numerosi minimi locali e da punti di sella. I metodi considerati possono avere difficoltà a uscire da situazioni di questo tipo, e in particolare dai punti di sella, spesso circondati da regioni a gradiente molto limitato.
#### Momento
I metodi precedenti risultano poco efficienti in situazioni in cui la funzione di costo varia in modo molto diverso al variare della direzione considerata (ad esempio se si hanno valli che discendono lentamente e con pareti laterali ripide). In questo caso, infatti, gli algoritmi precedenti procedono molto lentamente in direzione del minimo, oscillando in modo sostanziale nella direzione trasversale ad essa: questa situazione è illustrata a sinistra nella figura sottostante.
Il *metodo del momento* fa riferimento ad una interpretazione fisica del metodo di ottimizzazione, in cui il processo di discesa del gradiente viene visto come lo spostamento di un corpo di massa $m=1$ che si muove sulla superficie della funzione di costo $J(\theta)$ soggetto a una forza peso $F(\theta)=-\nabla U(\theta)$, dove $U(\theta)=\eta h(\theta)=\eta J(\theta)$ è l'energia potenziale del corpo nella posizione $\theta$ (si assume quindi che la costante fisica $g$ relativa alla forza peso $F=-mgh$ sia pari a $\eta$). In questo modello, il valore negativo del gradiente $-\eta J'(\theta)$ è quindi pari al vettore forza (e accelerazione, in quanto $a=\frac{F}{m}$) del corpo nel punto $\theta$.
Nel metodo della discesa del gradiente, si assume che lo spostamento del corpo in un certo punto $\theta$ sia determinato dalla accelerazione calcolata nello stesso punto, e quindi dal gradiente $J'(\theta)$, in quanto vale la regola di aggiornamento $\theta^{(k+1)}=\theta^{(k)}-\eta J'(\theta^{(k)})$.
Nel metodo del momento, si fa riferimento a un modello più consistente con la realtà fisica di un corpo che si muove su una superficie soggetto alla forza peso, modello che prevede di utilizzare il concetto di velocità $v(\theta)$. In questo modello, lo spostamento del corpo a partire da un certo punto $\theta$ è determinato dalla velocità calcolata nello stesso punto $\theta^{(k+1)}=\theta^{(k)}+v^{(k+1)}$, dove la variazione di velocità è data dalla accelerazione $v^{(k+1)}=v^{(k)}-\eta J'(\theta^{(k)})$.
Come si può osservare, si ha che
\begin{align*}
v^{(k+1)}&=-\eta J'(\theta^{(k)})+v^{(k)}=-\eta J'(\theta^{(k)})-\eta J'(\theta^{(k-1)})+v^{(k-1)}=\cdots=-\eta\sum_{i=0}^kJ'(\theta^{(i)})+v^{(0)}\\
\theta^{(k+1)}&=\theta^{(k)}+v^{(k+1)}=\theta^{(k)}-\eta\sum_{i=0}^kJ'(\theta^{(i)})+v^{(0)}
\end{align*}
che corrisponde all'associare lo spostamento alla somma (integrale nel caso della fisica) delle accelerazioni passate.

Il riferimento a questo modello porta l'algoritmo a tendere ad ogni passo a mantenere, almeno in parte, la direzione del passo precedente (in quanto $v^{(k+1)}=-\eta J'(\theta^{(k)})+v^{(k)})$, premiando le direzioni che si manifestano con costanza in una sequenza di passi. Ne deriva il comportamento a destra della figura precedente, in cui l'inerzia nella direzione del minimo porta a una limitazione delle oscillazioni.
Si noti che ciò non avviene nella discesa del gradiente, in cui si ha $v^{(k+1)}=-\eta J'(\theta^{(k)})$.
Matematicamente, l'effetto di inerzia viene ottenuto sottraendo alla velocità (vettoriale) calcolata al passo precedente la valutazione del gradiente effettuata nella corrispondente posizione. Il gradiente viene sottratto in quanto, mantenendo la corrispondenza con la meccanica, un gradiente positivo tende a ridurre la velocità.
Il metodo del momento utilizza tipicamente un secondo parametro $\gamma$, che determina la frazione di $v^{(k)}$ che permane nella definizione di $v^{(k+1)}$, e che svolge la funzione (fisicamente) di un coefficiente di attrito. Si ottiene quindi la formulazione:
\begin{align*}
v^{(k+1)}&=\gamma v^{(k)} -\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)\\
\theta^{(k+1)}&=\theta^{(k)}+v^{(k+1)}
\end{align*}
Il metodo del momento ad ogni passo determina inizialmente il vettore di spostamento attuale, a partire da quello al passo precedente e dal gradiente di $\theta$: il contributo relativo dei due termini è pesato dalla coppia di parametri $\gamma$ e $\eta$. Lo spostamento calcolato viene quindi applicato al valore attuale di $\theta$ (il segno meno deriva come sempre dal fatto che stiamo assumendo di cercare un minimo locale).
Se il gradiente è orientato nella stessa direzione della velocità attuale, tale velocità viene incrementata, per cui l'aggiornamento di $\theta$ diviene maggiore, incrementandosi man mano che la direzione di spostamento rimane coerente con il gradiente nei punti attraversati.
```python
v = 0
for i in range(n_epochs):
g = 0
for k in range(dataset_size):
g = g+evaluate_gradient(loss_function, theta, X[k])
v = gamma*v-eta*g
theta = theta+v
```
Come si può vedere, mentre $\theta^{(k)}=(\theta_1^{(k)},\ldots,\theta_d^{(k)})^T$ è la valutazione della soluzione ottima al passo $k$, $v^{(k)}=(v_1^{(k)},\ldots,v_d^{(k)})^T$ è l'aggiornamento applicato a tale valore per ottenere $\theta^{(k+1)}$: possiamo vedere quindi $v$ come il vettore velocità di spostamento di $\theta$ nello spazio delle soluzioni.
Come già illustrato sopra, possiamo esprimere l'aggiornamento nel modo seguente, evidenziando come esso dipenda dal gradiente calcolato in tutte le posizioni precedentemente attraversate, con un effetto che va a diminuire esponenzialmente con $\gamma$ man mano che si risale nel passato. Assumendo $v^{(0)}=0$:
\begin{align*}
\theta^{(k+1)}&=\theta^{(k+1)}+v^{(k+1)}= \theta^{(k)}+\gamma v^{(k)}-\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)=\theta^{(k)}+\gamma^2 v^{(k-1)}-\gamma\eta\sum_{i=1}^nJ'(\theta^{(k-1)};x_i) -\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)\\
&=\theta^{(k)}+\gamma^2 v^{(k-1)}-\eta\left(\sum_{i=1}^nJ'(\theta^{(k)};x_i)+\gamma\sum_{i=1}^nJ'(\theta^{(k-1)};x_i)\right)=\cdots=
\theta^{(k)}-\eta\left(\sum_{j=0}^k\gamma^j\sum_{i=1}^nJ'(\theta^{(k-j)};x_i)\right)
\end{align*}
Gli aggiornamenti nel caso della logistic regression derivano immediatamente
\begin{align*}
v_j^{(k+1)}&=\gamma v_j^{(k)}+\frac{\eta}{n}\sum_{i=1}^n( t_i-\sigma(x_i))x_{ij}\hspace{1cm}j=1,\ldots,d\\
v_0^{(k+1)}&=\gamma v_0^{(k)}+\frac{\eta}{n}\sum_{i=1}^n(t_i-\sigma(x_i)) \\
\theta_j^{(k+1)}&=\theta_j^{(k)}+v_j^{(k+1)}\hspace{1cm}j=0,\ldots,d
\end{align*}
```
def momentum_gd(X,t, eta = 0.1, gamma = 0.97, epochs = 1000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
v = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for k in range(epochs):
v = gamma*v - eta * gradient(theta,X,t)
theta = theta + v
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, m, q
cost_history, m, q = momentum_gd(X, t, eta = 0.1, gamma = 0.97, epochs = 10000)
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
```
#### Accelerazione del gradiente di Nesterov
Nel metodo del momento, la conoscenza al passo $k$ di $\theta^{(k)}$ e di $v^{(k)}$ permette, senza calcolare il gradiente, di avere una valutazione approssimata $\tilde{\theta}^{(k+1)}=\theta^{(k)}+\gamma v^{(k)}$ di
$$
\theta^{(k+1)}=\theta^{(k)}+v^{(k+1)}=\theta^{(k)}+\gamma v^{(k)}-\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)=\tilde{\theta}^{(k+1)}-\eta\sum_{i=1}^nJ'(\theta^{(k)};x_i)
$$
Il metodo di Nesterov segue lo stesso approccio del metodo del momento, con la differenza che, ad ogni passo, la valutazione del gradiente viene effettuata, con un *look-ahead* approssimato, non nel punto attuale $\theta^{(k)}$ dello spazio delle soluzioni visitato ma, più o meno, nel punto successivo $\theta^{(k+1)}$ (approssimato da $\tilde{\theta}^{(k+1)}$). In questo modo, le variazioni di $v$ (e quindi di $\theta$) vengono anticipate rispetto a quanto avviene nel metodo del momento.
\begin{align*}
v^{(k+1)}&=\gamma v^{(k)} +\eta\sum_{i=1}^nJ'(\tilde{\theta}^{(k)};x_i)=\gamma v^{(k)} +\eta\sum_{i=1}^nJ'(\theta^{(k)}+\gamma v^{(k)};x_i)\\
\theta^{(k+1)}&=\theta^{(k)}+v^{(k+1)}
\end{align*}

```python
v = 0
for i in range(n_epochs):
g = 0
theta_approx = theta+gamma*v
for k in range(dataset_size):
g = g+evaluate_gradient(loss_function, theta_approx, X[k])
v = gamma*v-eta*g
theta = theta+v
```
```
def nesterov_gd(X,t, eta = 0.1, gamma = 0.97, epochs = 1000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
v = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for k in range(epochs):
v = gamma*v - eta * gradient(theta+gamma*v,X,t)
theta = theta + v
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, m, q
cost_history, m, q = nesterov_gd(X, t, eta = 0.1, gamma = 0.97, epochs = 10000)
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
```
### Adagrad
Adagrad introduce la possibilità di applicare learning rate diversi ai vari parametri da ottimizzare (dimensioni dello spazio di ricerca): in particolare, dimensioni rispetto alle quali si sono avuti nel corso dei passi precedenti valori elevati del gradiente avranno associati, tendenzialmente, learning rate più piccoli, in modo tale che l'aggiornamento nella direzione stessa sia limitato. Al contrario, in presenza di dimensioni (parametri) con valori del gradiente precedenti piccoli, e quindi poco modificate, il learning rate risulterà più grande, rendendo le modifiche delle componenti corrispondenti più sensibili ai valori del gradiente.
Come visto, nella gradient descent "base" l'aggiornamento del coefficiente $k$-esimo è dato da
$$
\theta_j^{(k+1)}= \theta_j^{(k)}-\eta \frac{\partial J(\theta, X)}{\partial\theta_j}\Bigr\vert_{\small\theta=\theta^{(k)}}= \theta_j^{(k)}-\eta\sum_{i=1}^n\frac{\partial J(\theta;x_i)}{\partial\theta_j}\Bigr\vert_{\small\theta=\theta^{(k)}}
$$
dove $\eta$ è lo stesso per tutti i coefficienti. In Adagrad, l'aggiornamento prevede un learning rate
$\eta_{j}^{(k)}$, dipendente dal coefficiente e dal passo di applicazione del metodo, definito nel modo seguente
$$
\eta_{j}^{(k)} = \frac{\eta}{\sqrt{G_{j,k}+\varepsilon}}
$$
dove $\eta$ è una costante, $G_{j,k}=\sum_{i=0}^{k}g_{j,i}^2$ è la somma dei valori $g_{j,i}=\frac{\partial J(\theta, X)}{\partial\theta_j}\Bigr\vert_{\small\theta=\theta^{(i)}}$ del gradiente relativo a $\theta_j$ per tutte le iterazioni precedenti, mentre $\varepsilon$ è una piccola costante di *smoothing* utilizzata per evitare denominatori nulli.
Utilizzando il formalismo introdotto, l'aggiornamento di $\theta_j$ alla $k+1$-esima iterazione è data da
$$
\theta_j^{(k+1)}= \theta_j^{(k)}-\frac{\eta}{\sqrt{G_{j,k}+\varepsilon}}g_{j,k}
$$
Come si può vedere, il learning rate diminuisce in modo monotono al procedere delle iterazioni per tutti i coefficienti. Al tempo stesso, coefficienti con elevati valori di gradiente nel passato (quindi soggetti a variazioni significative) avranno decrementi più elevati del learning rate, che quindi tenderà più rapidamente a $0$ e a modificare poco i valori di tali cofficienti, mentre coefficienti con valori limitati (che sono variati poco fino ad ora) manterrano un learning rate più alto.
Dato che in ogni caso il denominatore cresce ad ogni iterazione, il learning rate continua a diminuire fino a raggiungere valori talmente piccoli da impedire un reale aggiornamento della soluzione.
### Adadelta
Al fine di limitare una eccessiva diminuzione del learning rate, in Adadelta non vengono accumulati tutti i gradienti passati attraverso la somma dei loro quadrati, per cui si ha:
$$
G_{j,k}=G_{j,k-1}+g_{j,k}^2
$$
viene invece applicato uno *smorzamento* (decay) per mezzo di un coefficiente $0<\gamma<1$
$$
G_{j,k}=\gamma G_{j,k-1}+(1-\gamma)g_{j,k}^2
$$
### Metodi del secondo ordine
La ricerca di un punto di massimo (o minimo) può anche essere effettuata in termini di ricerca di punti in cui la derivata prima (o il gradiente, in generale) si annullano, applicando uno dei metodi iterativi standard per la ricerca degli zeri di una funzione.
Un tipico metodo utilizzato in questo ambito è quello di Newton-Raphson, in cui (considerando una funzione univariata) viene applicato, ad ogni iterazione, l'aggiornamento
$$
x_{i+1}=x_{i}-\frac{f(x_{i})}{f'(x_{i})}
$$
A ogni iterazione, l'algoritmo approssima $f$ per mezzo di una retta tangente a $f$ in $(x_i,f(x_{i}))$, e definisce $x_{i+1}$ come il valore in cui tale retta interseca l'asse $x$.

Nel caso in cui si cercano punti di massimo o minimo, l'iterazione evidentemente diviene
$$
x_{i+1}=x_{i}-\frac{f'(x_{i})}{f''(x_{i})}
$$
Nel caso di funzioni a più variabili, la derivata prima è sostituita dal gradiente
$\nabla f$, mentre la derivata seconda corrisponde alla matrice *Hessiana* $H$, definita come
$$
H_{ij}(f)=\frac{\partial^{2}f}{\partial x_{i}\partial x_{j}}
$$
L'aggiornamento ad ogni iterazione diviene quindi
$$
x^{(i+1)}=x^{(i)}-\big(H(f)^{-1}\nabla f\big)\big|_{ x_{(i)}}
$$
|
github_jupyter
|
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy as scipy
import scipy.special as sp
import pandas as pd
import urllib.request
colors = ["xkcd:dusty blue", "xkcd:dark peach", "xkcd:dark seafoam green",
"xkcd:dusty purple","xkcd:watermelon", "xkcd:dusky blue", "xkcd:amber",
"xkcd:purplish", "xkcd:dark teal", "xkcd:orange", "xkcd:slate"]
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['image.cmap'] = 'jet'
plt.rcParams['image.interpolation'] = 'none'
plt.rcParams['figure.figsize'] = (16, 8)
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 8
filepath = "../dataset/"
url = "https://tvml.github.io/ml2021/dataset/"
def get_file(filename,local):
if local:
return filepath+filename
else:
urllib.request.urlretrieve (url+filename, filename)
return filename
def plot_ds(data,m=None,q=None):
fig = plt.figure(figsize=(16,8))
minx, maxx = min(data.x1), max(data.x1)
deltax = .1*(maxx-minx)
x = np.linspace(minx-deltax,maxx+deltax,1000)
ax = fig.gca()
ax.scatter(data[data.t==0].x1, data[data.t==0].x2, s=40, edgecolor='k', alpha=.7)
ax.scatter(data[data.t==1].x1, data[data.t==1].x2, s=40, edgecolor='k', alpha=.7)
if m:
ax.plot(x, m*x+q, lw=2, color=colors[5])
plt.xlabel('$x_1$', fontsize=12)
plt.ylabel('$x_2$', fontsize=12)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.title('Dataset', fontsize=12)
plt.show()
def plot_all(cost_history, m, q, low, high, step):
idx = range(low,high,step)
ch = cost_history[idx]
th1 = m[idx]
th0 = q[idx]
fig = plt.figure(figsize=(18,6))
ax = fig.add_subplot(1,2,1)
minx, maxx, miny, maxy = 0, len(ch), ch.min(), ch.max()
deltay, deltax = .1*(maxy-miny), .1*(maxx-minx)
miny, maxy, minx, maxx = miny - deltay, maxy + deltay, minx - deltax, maxx + deltax
ax.plot(range(len(ch)), ch, alpha=1, color=colors[0], linewidth=2)
plt.xlabel('iterazioni')
plt.ylabel('costo')
plt.xlim(minx,maxx)
plt.ylim(miny,maxy)
ax.xaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, pos: '{:0.0f}'.format(x*step+low)))
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
ax = fig.add_subplot(1,2,2)
minx, maxx, miny, maxy = th0.min(), th0.max(), th1.min(), th1.max()
deltay, deltax = .1*(maxy-miny), .1*(maxx-minx)
miny, maxy = miny - deltay, maxy + deltay
miny, maxy, minx, maxx = miny - deltay, maxy + deltay, minx - deltax, maxx + deltax
ax.plot(th0, th1, alpha=1, color=colors[1], linewidth=2, zorder=1)
ax.scatter(th0[-1],th1[-1], color=colors[5], marker='o', s=40, zorder=2)
plt.xlabel(r'$m$')
plt.ylabel(r'$q$')
plt.xlim(minx,maxx)
plt.ylim(miny,maxy)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.tight_layout()
plt.show()
for i in range(n_epochs):
g = 0
for k in range(dataset_size):
g = g+evaluate_gradient(loss_function, theta, X[k])
theta = theta-eta*g
data = pd.read_csv(get_file("testSet.txt", local=0), delim_whitespace=True, header=None, names=['x1','x2','t'])
plot_ds(data)
n = len(data)
nfeatures = len(data.columns)-1
X = np.array(data[['x1','x2']])
t = np.array(data['t']).reshape(-1,1)
X = np.column_stack((np.ones(n), X))
def sigma(theta, X):
return sp.expit(np.dot(X, theta))
def approx_zero(v):
eps = 1e-50
v[v<eps]=eps
return v
def cost(theta, X, t):
eps = 1e-50
v = sigma(theta,X)
v[v<eps]=eps
term1 = np.dot(np.log(v).T,t)
v = 1.0 - sigma(theta,X)
v[v<eps]=eps
term2 = np.dot(np.log(v).T,1-t)
return ((-term1 - term2) / len(X))[0]
def gradient(theta, X, t):
return -np.dot(X.T, (t-sigma(theta, X))) / len(X)
def batch_gd(X, t, eta = 0.1, epochs = 10000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for k in range(epochs):
theta = theta - eta * gradient(theta,X,t)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, theta_history, m, q
cost_history, theta_history, m, q = batch_gd(X, t, eta = 0.1, epochs = 100000)
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
m_star = 0.62595499
q_star = 7.3662299
f = lambda i: np.sqrt((m_star-m[i])**2+(q_star-q[i])**2)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
for i in range(n_epochs):
np.random.shuffle(data)
for k in range(dataset_size):
g = evaluate_gradient(loss_function, theta, X[k])
theta = theta-eta*g
def stochastic_gd(X, t, eta = 0.01, epochs = 1000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for j in range(epochs):
for i in range(n):
e = (t[i] - sigma(theta, X[i,:]))[0]
theta = theta + eta * e * X[i,:].reshape(-1,1)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, theta_history, m, q
cost_history, theta_history, m, q = stochastic_gd(X, t, eta = 0.01, epochs = 10000)
low, high, step = 0*n, 150*n, 30
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
low, high, step = 0*n, 1000*n, n
plot_all(cost_history, m, q, low, high, step)
for i in range(n_epochs):
np.random.shuffle(data)
for batch in get_batches(dataset, batch_size):
g = 0
for x in batch:
g = g+evaluate_gradient(loss_function, theta, batch)
theta = theta-eta*g
def mb_gd(X, t, eta = 0.01, epochs = 1000, minibatch_size = 5):
mb = int(np.ceil(float(n)/minibatch_size))
idx = np.arange(0,n)
np.random.shuffle(idx)
theta = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
cost_history_iter = []
for j in range(epochs):
for k in range(mb-1):
g = 0
for i in idx[k*minibatch_size:(k+1)*minibatch_size]:
e = (t[i] - sigma(theta, X[i,:]))[0]
g = g + e * X[i,:]
theta = theta + eta * g.reshape(-1,1)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
g = 0
for i in idx[k*minibatch_size:n]:
e = (t[i] - sigma(theta, X[i,:]))[0]
g = g + e * X[i,:]
theta = theta + eta * g.reshape(-1,1)
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, m, q
cost_history, m, q = mb_gd(X, t, eta = 0.01, epochs = 10000, minibatch_size = 5)
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
v = 0
for i in range(n_epochs):
g = 0
for k in range(dataset_size):
g = g+evaluate_gradient(loss_function, theta, X[k])
v = gamma*v-eta*g
theta = theta+v
def momentum_gd(X,t, eta = 0.1, gamma = 0.97, epochs = 1000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
v = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for k in range(epochs):
v = gamma*v - eta * gradient(theta,X,t)
theta = theta + v
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, m, q
cost_history, m, q = momentum_gd(X, t, eta = 0.1, gamma = 0.97, epochs = 10000)
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
v = 0
for i in range(n_epochs):
g = 0
theta_approx = theta+gamma*v
for k in range(dataset_size):
g = g+evaluate_gradient(loss_function, theta_approx, X[k])
v = gamma*v-eta*g
theta = theta+v
def nesterov_gd(X,t, eta = 0.1, gamma = 0.97, epochs = 1000):
theta = np.zeros(nfeatures+1).reshape(-1,1)
v = np.zeros(nfeatures+1).reshape(-1,1)
theta_history = []
cost_history = []
for k in range(epochs):
v = gamma*v - eta * gradient(theta+gamma*v,X,t)
theta = theta + v
theta_history.append(theta)
cost_history.append(cost(theta, X, t))
theta_history = np.array(theta_history).reshape(-1,3)
cost_history = np.array(cost_history).reshape(-1,1)
m = -theta_history[:,1]/theta_history[:,2]
q = -theta_history[:,0]/theta_history[:,2]
return cost_history, m, q
cost_history, m, q = nesterov_gd(X, t, eta = 0.1, gamma = 0.97, epochs = 10000)
low, high, step = 0, 5000, 10
plot_all(cost_history, m, q, low, high, step)
dist = np.array([f(i) for i in range(len(m))])
np.argmin(dist>1e-2)+1
plot_ds(data,m[-1],q[-1])
| 0.33372 | 0.880077 |
**[MSL-01]** 必要なモジュールをインポートして、乱数のシードを設定します。
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
np.random.seed(20160612)
tf.set_random_seed(20160612)
```
**[MSL-02]** MNISTのデータセットを用意します。
```
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
```
**[MSL-03]** 単層ニューラルネットワークを用いた確率 p の計算式を用意します。
```
num_units = 1024
x = tf.placeholder(tf.float32, [None, 784])
w1 = tf.Variable(tf.truncated_normal([784, num_units]))
b1 = tf.Variable(tf.zeros([num_units]))
hidden1 = tf.nn.relu(tf.matmul(x, w1) + b1)
w0 = tf.Variable(tf.zeros([num_units, 10]))
b0 = tf.Variable(tf.zeros([10]))
p = tf.nn.softmax(tf.matmul(hidden1, w0) + b0)
```
**[MSL-04]** 誤差関数 loss、トレーニングアルゴリズム train_step、正解率 accuracy を定義します。
```
t = tf.placeholder(tf.float32, [None, 10])
loss = -tf.reduce_sum(t * tf.log(p))
train_step = tf.train.AdamOptimizer().minimize(loss)
correct_prediction = tf.equal(tf.argmax(p, 1), tf.argmax(t, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
```
**[MSL-05]** セッションを用意して、Variableを初期化します。
```
sess = tf.Session()
sess.run(tf.initialize_all_variables())
```
**[MSL-06]** パラメーターの最適化を2000回繰り返します。
1回の処理において、トレーニングセットから取り出した100個のデータを用いて、勾配降下法を適用します。
最終的に、テストセットに対して約97%の正解率が得られます。
```
i = 0
for _ in range(2000):
i += 1
batch_xs, batch_ts = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, t: batch_ts})
if i % 100 == 0:
loss_val, acc_val = sess.run([loss, accuracy],
feed_dict={x:mnist.test.images, t: mnist.test.labels})
print ('Step: %d, Loss: %f, Accuracy: %f'
% (i, loss_val, acc_val))
```
**[MSL-07]** 最適化されたパラメーターを用いて、テストセットに対する予測を表示します。
ここでは、「0」〜「9」の数字に対して、正解と不正解の例を3個ずつ表示します。
```
images, labels = mnist.test.images, mnist.test.labels
p_val = sess.run(p, feed_dict={x:images, t: labels})
fig = plt.figure(figsize=(8,15))
for i in range(10):
c = 1
for (image, label, pred) in zip(images, labels, p_val):
prediction, actual = np.argmax(pred), np.argmax(label)
if prediction != i:
continue
if (c < 4 and i == actual) or (c >= 4 and i != actual):
subplot = fig.add_subplot(10,6,i*6+c)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.set_title('%d / %d' % (prediction, actual))
subplot.imshow(image.reshape((28,28)), vmin=0, vmax=1,
cmap=plt.cm.gray_r, interpolation="nearest")
c += 1
if c > 6:
break
```
|
github_jupyter
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
np.random.seed(20160612)
tf.set_random_seed(20160612)
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
num_units = 1024
x = tf.placeholder(tf.float32, [None, 784])
w1 = tf.Variable(tf.truncated_normal([784, num_units]))
b1 = tf.Variable(tf.zeros([num_units]))
hidden1 = tf.nn.relu(tf.matmul(x, w1) + b1)
w0 = tf.Variable(tf.zeros([num_units, 10]))
b0 = tf.Variable(tf.zeros([10]))
p = tf.nn.softmax(tf.matmul(hidden1, w0) + b0)
t = tf.placeholder(tf.float32, [None, 10])
loss = -tf.reduce_sum(t * tf.log(p))
train_step = tf.train.AdamOptimizer().minimize(loss)
correct_prediction = tf.equal(tf.argmax(p, 1), tf.argmax(t, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.Session()
sess.run(tf.initialize_all_variables())
i = 0
for _ in range(2000):
i += 1
batch_xs, batch_ts = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, t: batch_ts})
if i % 100 == 0:
loss_val, acc_val = sess.run([loss, accuracy],
feed_dict={x:mnist.test.images, t: mnist.test.labels})
print ('Step: %d, Loss: %f, Accuracy: %f'
% (i, loss_val, acc_val))
images, labels = mnist.test.images, mnist.test.labels
p_val = sess.run(p, feed_dict={x:images, t: labels})
fig = plt.figure(figsize=(8,15))
for i in range(10):
c = 1
for (image, label, pred) in zip(images, labels, p_val):
prediction, actual = np.argmax(pred), np.argmax(label)
if prediction != i:
continue
if (c < 4 and i == actual) or (c >= 4 and i != actual):
subplot = fig.add_subplot(10,6,i*6+c)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.set_title('%d / %d' % (prediction, actual))
subplot.imshow(image.reshape((28,28)), vmin=0, vmax=1,
cmap=plt.cm.gray_r, interpolation="nearest")
c += 1
if c > 6:
break
| 0.531453 | 0.922831 |
**420-A58-SF - Algorithmes d'apprentissage non supervisé - Été 2021 - Spécialisation technique en Intelligence Artificielle**<br/>
MIT License - Copyright (c) 2021 Mikaël Swawola
<br/>

<br/>
**Objectif: Lors de l'exploration d'un jeu de données constitué de documents textes - tels que des pages Wikipedia, des articles de presse, StackOverflow, etc., il est courant de chercher à trouver quels sont les documents similaires. L'objectif de cet exercice est de mettre en oeuvre les techniques de recherche adaptées (ici les plus proches voisins) à ce type de données. Les documents utilisés sont les pages Wikipedia de personnalités.**
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import numpy as np
import pandas as pd
# Le reste des modules sera importé au fur et à mesure des exercices ...
```
L'archive `people.zip` contient 4 fichiers:
* **people_wiki.csv**: jeu de données consituté des pages Wikipedia de personnalités
* **people_wiki_map_index_to_word.json**: mapping entre les mots et les indices
* **people_wiki_word_count.npz**: vecteurs d'occurence des mots (word count / sacs de mot) pour chaque document
* **people_wiki_tf_idf.npz**: vecteurs TF-IDF pour chaque document
Dans l'énoncé de ce TP, les mots "article" et "document" sont interchangeables.
## 1 - Chargement du jeu de données
**Exercice 1-1 - À l'aide de la librairie Pandas, lire le fichier de données `people/people_wiki.csv`. Afin de permettre les opérations de type `join` effectuées plus loin dans le TP, nommez l'index de la trame de donnée `id`**
```
# Compléter cette cellule ~ 2 lignes de code
wiki = pd.read_csv('../../data/people/people_wiki.csv')
wiki.index.name = 'id'
```
**Exercice 1-2 - Afficher les 5 premières lignes de la trame de données. Quelles informations contiennent les colonnes ?**
```
# Compléter cette cellule ~ 1 ligne de code
wiki.head()
```
## 2 - Extraction du nombre de mots
Les vecteurs d'occurence des mots (**word count**) du jeu de données ont été préalablement extrait dans le fichier `people/people_wiki_word_count.npz`. Ces vecteurs sont regroupés dans une matrice diluée (sparse), où la i-ème ligne donne le vecteur d'occurence des mots pour le i-ème document. Chaque colonne correspond à un mot unique apparaissant dans le jeu de données. Le mapping entre les mots et les indices est donné dans `people/people_wiki_map_index_to_word.json`
La fonction suivante permet le chargement des vecteurs d'occurence des mots:
```
from scipy.sparse import csr_matrix
def load_sparse_csr(filename):
loader = np.load(filename)
data = loader['data']
indices = loader['indices']
indptr = loader['indptr']
shape = loader['shape']
return csr_matrix( (data, indices, indptr), shape)
```
La fonction ci-dessus utilise `csr_matrix` de la bibliothèque SciPy:<br/>
[class scipy.sparse.csr_matrix(arg1, shape=None, dtype=None, copy=False)](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html)
**Exercice 2-1 - À l'aide de la fonction ci-dessus, charger le ficher contenant les vecteurs d'occurence des mots**
```
# Compléter cette cellule ~ 1 ligne de code
from scipy.sparse import csr_matrix
word_count = load_sparse_csr('../../data/people/people_wiki_word_count.npz')
```
**Exercice 2-2 - En vous référant à la documentation de la fonction `csr_matrix`, convertissez la matrice `word_count` en tableau NumPy. Que constatez-vous ?**
```
# Compléter cette cellule ~ 1 ligne de code
word_count
59071*547979
word_count.toarray()
```
**Exercice 2-3 - À l'aide du module json ou de la librairie Pandas, charger le ficher contenant le mapping entre les mots et les indices. Combien y a-t-il de mots dans le dictionnaire ?**
```
# Compléter cette cellule ~ 2-3 lignes de code
import json
with open('../../data/people/people_wiki_map_index_to_word.json') as f:
map_index_to_word = json.load(f)
len(map_index_to_word)
```
**Exercice 2-4 (optionnel) - Extraire par vous-même les vecteurs d'occurence des mots. Un bon point de départ est la fonction `sklearn.CountVectorizer`**
```
# Compléter cette cellule
```
## 3 - Recherche des plus proches voisins avec représentation word count
Commençons par trouver les voisins les plus proches de la page Wikipedia de **Barack Obama**. Les vecteurs d'occurence des mots (**word count**) seront utilisés pour représenter les articles et la **distance euclidienne** pour mesurer la similarité.
[class sklearn.neighbors.NearestNeighbors(*, n_neighbors=5, radius=1.0, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, n_jobs=None)](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html#sklearn.neighbors.NearestNeighbors)
**Exercice 3-1 - Quel est l'id correspondant à la page Wikipedia de barack Obama ?**
```
# Compléter cette cellule ~ 1 ligne de code
wiki['name'] == 'Barack Obama'
#wiki[wiki['name'] == 'Barack Obama']
```
**Exercice 3-2 - À l'aide de scikit-learn, rechercher les 10 pages Wikipedia de personnalités les plus similaires à la page de Barack Obama. Affichez les distances et noms de personalités dans une même trame de données**
```
# Compléter cette cellule ~ 5-6 lignes de code
from sklearn.neighbors import NearestNeighbors
model = NearestNeighbors(metric='euclidean', algorithm='brute').fit(word_count)
distances, indices = model.kneighbors(word_count[35817], n_neighbors=10)
indices
neighbors = pd.DataFrame({'distance':distances.flatten(), 'id':indices.flatten()}).set_index('id')
wiki.join(neighbors, on='id', how="right").sort_values(by='distance')[['name','distance']]
```
**Exercice 3-3 - Interprétez les résultats ci-dessus**
```
# Compléter cette cellule
```
Les 10 personnalités sont toutes des politiciens, mais à peu près la moitié d'entre elles ont des liens assez ténus avec Obama, outre le fait qu'ils sont des politiciens.
* Francisco Barrio est un homme politique mexicain et ancien gouverneur de Chihuahua.
* Walter Mondale et Don Bonker sont des démocrates qui ont fait carrière à la fin des années 1970.
* Wynn Normington Hugh-Jones est un ancien diplomate britannique et fonctionnaire du Parti libéral.
* Andy Anstett est un ancien politicien au Manitoba, au Canada.
**Exercice 3-4 - Affichez les mots les plus fréquents des pages de Barack Obama et Francisco Barrio**
Afin de pouvoir reconnaître rapidement les mots d'une grande importance, la fonction suivante permettant d'obtenir la colonne `word_count` est fournie.
```
def unpack_dict(matrix, map_index_to_word):
table = sorted(map_index_to_word, key=map_index_to_word.get)
data = matrix.data
indices = matrix.indices
indptr = matrix.indptr
num_doc = matrix.shape[0]
return [{k:v for k,v in zip([table[word_id] for word_id in indices[indptr[i]:indptr[i+1]] ],
data[indptr[i]:indptr[i+1]].tolist())} for i in range(num_doc) ]
# Compléter cette cellule ~ 2 lignes de code
wiki['word_count'] = unpack_dict(word_count, map_index_to_word)
wiki['word_count']
```
**Exercice 3-5 - Créer une fonction `top_words`, permattant d'afficher les mots les plus fréquents d'une page donnée**
```
# Compléter cette cellule ~ 10 lignes de code
def top_words(name):
"""
Retourne la table des mots les plus fréquents d'une page Wikipedia du jeu de données.
"""
row = wiki[wiki['name'] == name]
word_count_df = pd.DataFrame(row['word_count'].apply(pd.Series).stack(), columns=["count"]).droplevel(0)
word_count_df.index.name = 'word'
return word_count_df.sort_values(by='count', ascending=False)
obama_words = top_words('Barack Obama')
barrio_words = top_words('Francisco Barrio')
combined_words = obama_words.join(barrio_words, on='word', how="inner", lsuffix='_obama', rsuffix='_barrio')
combined_words.head(10)
```
## 4 - Recherche des plus proches voisins avec représentation TF-IDF
**Exercice 4 - Répétez les étapes des exercices de la partie 3 en utilisant cette fois-ci la représentation TF-IDF. Comparez avec les résultats obtenu par la représentation word count**
```
# Compléter cette cellule ~ 14-20 lignes de code
# Chargement des représentations TF-IDF
tf_idf = load_sparse_csr('../../data/people/people_wiki_tf_idf.npz')
# Recherche des 10 plus proches voisins
model_tf_idf = NearestNeighbors(metric='euclidean', algorithm='brute').fit(tf_idf)
distances, indices = model_tf_idf.kneighbors(tf_idf[35817], n_neighbors=10)
# Préparation de la trame de données des résultats
neighbors = pd.DataFrame({'distance':distances.flatten(), 'id':indices.flatten()}).set_index('id')
wiki.join(neighbors, on='id', how='right').sort_values(by='distance')[['name','distance']]
# Affichage des mots les plus significatifs des deux pages
wiki['tf_idf'] = unpack_dict(tf_idf, map_index_to_word)
def top_words_tf_idf(name):
row = wiki[wiki['name'] == name]
tf_idf_df = pd.DataFrame(row['tf_idf'].apply(pd.Series).stack(), columns=["weight"]).droplevel(0)
tf_idf_df.index.name = 'word'
return tf_idf_df.sort_values(by='weight', ascending=False)
obama_words = top_words_tf_idf('Barack Obama')
barrio_words = top_words_tf_idf('Francisco Barrio')
combined_words = obama_words.join(barrio_words, on='word', how="inner", lsuffix='_obama', rsuffix='_barrio')
combined_words.head(10)
```
### Fin de l'atelier 02-02-A1
|
github_jupyter
|
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import numpy as np
import pandas as pd
# Le reste des modules sera importé au fur et à mesure des exercices ...
# Compléter cette cellule ~ 2 lignes de code
wiki = pd.read_csv('../../data/people/people_wiki.csv')
wiki.index.name = 'id'
# Compléter cette cellule ~ 1 ligne de code
wiki.head()
from scipy.sparse import csr_matrix
def load_sparse_csr(filename):
loader = np.load(filename)
data = loader['data']
indices = loader['indices']
indptr = loader['indptr']
shape = loader['shape']
return csr_matrix( (data, indices, indptr), shape)
# Compléter cette cellule ~ 1 ligne de code
from scipy.sparse import csr_matrix
word_count = load_sparse_csr('../../data/people/people_wiki_word_count.npz')
# Compléter cette cellule ~ 1 ligne de code
word_count
59071*547979
word_count.toarray()
# Compléter cette cellule ~ 2-3 lignes de code
import json
with open('../../data/people/people_wiki_map_index_to_word.json') as f:
map_index_to_word = json.load(f)
len(map_index_to_word)
# Compléter cette cellule
# Compléter cette cellule ~ 1 ligne de code
wiki['name'] == 'Barack Obama'
#wiki[wiki['name'] == 'Barack Obama']
# Compléter cette cellule ~ 5-6 lignes de code
from sklearn.neighbors import NearestNeighbors
model = NearestNeighbors(metric='euclidean', algorithm='brute').fit(word_count)
distances, indices = model.kneighbors(word_count[35817], n_neighbors=10)
indices
neighbors = pd.DataFrame({'distance':distances.flatten(), 'id':indices.flatten()}).set_index('id')
wiki.join(neighbors, on='id', how="right").sort_values(by='distance')[['name','distance']]
# Compléter cette cellule
def unpack_dict(matrix, map_index_to_word):
table = sorted(map_index_to_word, key=map_index_to_word.get)
data = matrix.data
indices = matrix.indices
indptr = matrix.indptr
num_doc = matrix.shape[0]
return [{k:v for k,v in zip([table[word_id] for word_id in indices[indptr[i]:indptr[i+1]] ],
data[indptr[i]:indptr[i+1]].tolist())} for i in range(num_doc) ]
# Compléter cette cellule ~ 2 lignes de code
wiki['word_count'] = unpack_dict(word_count, map_index_to_word)
wiki['word_count']
# Compléter cette cellule ~ 10 lignes de code
def top_words(name):
"""
Retourne la table des mots les plus fréquents d'une page Wikipedia du jeu de données.
"""
row = wiki[wiki['name'] == name]
word_count_df = pd.DataFrame(row['word_count'].apply(pd.Series).stack(), columns=["count"]).droplevel(0)
word_count_df.index.name = 'word'
return word_count_df.sort_values(by='count', ascending=False)
obama_words = top_words('Barack Obama')
barrio_words = top_words('Francisco Barrio')
combined_words = obama_words.join(barrio_words, on='word', how="inner", lsuffix='_obama', rsuffix='_barrio')
combined_words.head(10)
# Compléter cette cellule ~ 14-20 lignes de code
# Chargement des représentations TF-IDF
tf_idf = load_sparse_csr('../../data/people/people_wiki_tf_idf.npz')
# Recherche des 10 plus proches voisins
model_tf_idf = NearestNeighbors(metric='euclidean', algorithm='brute').fit(tf_idf)
distances, indices = model_tf_idf.kneighbors(tf_idf[35817], n_neighbors=10)
# Préparation de la trame de données des résultats
neighbors = pd.DataFrame({'distance':distances.flatten(), 'id':indices.flatten()}).set_index('id')
wiki.join(neighbors, on='id', how='right').sort_values(by='distance')[['name','distance']]
# Affichage des mots les plus significatifs des deux pages
wiki['tf_idf'] = unpack_dict(tf_idf, map_index_to_word)
def top_words_tf_idf(name):
row = wiki[wiki['name'] == name]
tf_idf_df = pd.DataFrame(row['tf_idf'].apply(pd.Series).stack(), columns=["weight"]).droplevel(0)
tf_idf_df.index.name = 'word'
return tf_idf_df.sort_values(by='weight', ascending=False)
obama_words = top_words_tf_idf('Barack Obama')
barrio_words = top_words_tf_idf('Francisco Barrio')
combined_words = obama_words.join(barrio_words, on='word', how="inner", lsuffix='_obama', rsuffix='_barrio')
combined_words.head(10)
| 0.372277 | 0.948202 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.