code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
```
# MODIFY!
# use RobustScaler!
model_name = 'rf-wo'
```
# Import Libraries & Data
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('./data/d-wo-ns.csv')
X = df.drop('throughput',axis=1)
y = df['throughput']
```
---
# Scale Data
```
# Split the data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
# Scale the data
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
```
# Determine Hyperparameters
## Determine Max Features
```
# MODIFY!
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(warm_start=True)
model
p_grid = {
'max_features':['auto', 'sqrt', 'log2']
}
from sklearn.model_selection import GridSearchCV
grid_model = GridSearchCV(
estimator=model,
param_grid =p_grid,
scoring='neg_mean_squared_error',
cv=10,
verbose=1
)
grid_model.fit(X_train,y_train)
grid_model.best_params_
hp = pd.Series(name=f'{model_name} HP', data=grid_model.best_params_)
hp
hp.to_csv(f'./hyperparameters/{model_name}.csv')
```
## Determine Number of Estimators
```
# MODIFY!
import time
estimators = [5,10,25,50,100,500,1000,2000]
scores_list = []
from sklearn.ensemble import RandomForestRegressor
for e in estimators:
start = time.time()
print(f'Starting {e} estimators')
score_model = RandomForestRegressor(
n_estimators=e,
max_features=grid_model.best_params_['max_features']
)
from sklearn.model_selection import cross_validate
scores = cross_validate(
score_model,
X_train,
y_train,
scoring=[
'neg_mean_absolute_error',
'neg_mean_squared_error',
'neg_root_mean_squared_error'
],
cv=10
)
scores = pd.DataFrame(scores)
scores
mean_scores = scores.mean()
scores_list.append(mean_scores)
print(f'Done with {e} estimators')
end = time.time()
print(f'Elapsed time: {end - start}')
print('\n')
scores_df = pd.DataFrame(scores_list)
scores_df
scores_df['Est'] = estimators
scores_df
```
# Export Scores for Number of Estimators
```
# mean_scores
scores_df = scores_df.rename(columns={
'fit_time':'Fit Time',
'score_time':'Score Time',
'test_neg_mean_absolute_error':'MAE',
'test_neg_mean_squared_error':'MSE',
'test_neg_root_mean_squared_error':'RMSE'
})
scores_df
scores_df[['MAE','MSE','RMSE']] = scores_df[['MAE','MSE','RMSE']].apply(lambda x : abs(x))
scores_df
# we go for 25 estimations. Note that from 25 -> 50 estimations the error metrics worsen
scores_df.to_csv(f'./est-scores/{model_name}-est.csv')
pct_scores_df = scores_df.pct_change()
pct_scores_df
pct_scores_df.style.background_gradient()
# we choose index 4, 100 Estimations <-
# Score time decrease 3->4 & Score time increase dramatically 4->5
# unsubstantial decrease MSE & RMSE 4->5 compared to 3->4
```
# Score Model
```
# MODIFY!
# adopt number of estimators from the previous section
score_model = RandomForestRegressor(n_estimators=100,warm_start=True)
score_model
from sklearn.model_selection import cross_validate
scores = cross_validate(
score_model,
X_train,
y_train,
scoring=[
'neg_mean_absolute_error',
'neg_mean_squared_error',
'neg_root_mean_squared_error'
],
cv=10
)
scores = pd.DataFrame(scores)
scores
mean_scores = scores.mean()
mean_scores
```
# Export Model Scores
```
mean_scores = mean_scores.rename(f'{model_name}')
mean_scores[[2,3,4]] = mean_scores[[2,3,4]].apply(abs)
# mean_scores
mean_scores = mean_scores.rename({
'fit_time':'Fit Time',
'score_time':'Score Time',
'test_neg_mean_absolute_error':'MAE',
'test_neg_mean_squared_error':'MSE',
'test_neg_root_mean_squared_error':'RMSE'
})
mean_scores
mean_scores['STD FT'] = scores.fit_time.std()
mean_scores['STD ST'] = scores.score_time.std()
mean_scores['STD MAE'] = scores.test_neg_mean_absolute_error.std()
mean_scores['STD MSE'] = scores.test_neg_mean_squared_error.std()
mean_scores['STD RMSE'] = scores.test_neg_root_mean_squared_error.std()
mean_scores
mean_scores.to_csv(f'./scores/{model_name}.csv')
```
# Holdout Test
```
score_model.fit(X_train,y_train)
y_holdout = score_model.predict(X_test)
sns.histplot(y_holdout,bins=40,kde=True)
fig,ax = plt.subplots()
sns.kdeplot(y_test, ax=ax, shade=True, label='Observations')
sns.kdeplot(y_holdout, ax=ax,shade=True,label='Predictions')
ax.legend(loc='best')
fig,ax = plt.subplots()
sns.ecdfplot(y_test, ax=ax, label='Observations')
sns.ecdfplot(y_holdout, ax=ax,label='Predictions')
plt.axvline(x=y_test.mean(),color='grey',linestyle='--')
ax.legend(loc='best')
from sklearn.metrics import mean_absolute_error, mean_squared_error
mae = mean_absolute_error(y_test,y_holdout)
mse = mean_squared_error(y_test,y_holdout)
rmse = np.sqrt(mse)
err_df = pd.DataFrame(data=[mae, mse, rmse],index=['MAE','MSE','RMSE'],columns=[f'{model_name}'])
err_df
err_df.to_csv(f'./holdout-test/{model_name}.csv')
# mean_scores.to_csv(f'./opt-model-err/{model_name}-err.csv')
res = y_test - y_holdout
res.describe().drop('count')
sns.histplot(data=res, kde=True,bins=40)
ax = sns.scatterplot(x=y_test, y=res)
ax.set(ylabel='Residuals', xlabel='Test Label')
plt.axhline(y=0,color='red',linestyle='--')
# there should be no clear pattern / curve in the plot
# we see a positive correlation between Test Label and Residuals -> later models should avoid this pattern
import scipy as sp
fig, ax = plt.subplots()
sp.stats.probplot(res,plot=ax);
```
# Export Optimized Model
```
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
scaler.fit(X)
# MODIFY!
X = scaler.transform(X)
op_model = score_model
op_model.fit(X,y)
y_pred = op_model.predict(X)
sns.histplot(y_pred,bins=40,kde=True)
fig,ax = plt.subplots()
sns.kdeplot(y, ax=ax, shade=True, label='Observations')
sns.kdeplot(y_pred, ax=ax,shade=True,label='Predictions')
ax.legend(loc='best')
fig,ax = plt.subplots()
sns.ecdfplot(y, ax=ax, label='Observations')
sns.ecdfplot(y_pred, ax=ax,label='Predictions')
plt.axvline(x=y.mean(),color='grey',linestyle='--')
ax.legend(loc='best')
mae = mean_absolute_error(y,y_pred)
mse = mean_squared_error(y,y_pred)
rmse = np.sqrt(mse)
err_df = pd.DataFrame(data=[mae, mse, rmse],index=['MAE','MSE','RMSE'],columns=[f'{model_name}'])
err_df
err_df.to_csv(f'./model-err/{model_name}.csv')
# mean_scores.to_csv(f'./opt-model-err/{model_name}-err.csv')
from joblib import dump, load
dump(op_model, f'./opt-models/{model_name}.joblib')
res = y - y_pred
res.describe().drop('count')
sns.histplot(data=res, kde=True,bins=40)
ax = sns.scatterplot(x=y_test, y=res)
ax.set(ylabel='Residuals', xlabel='Test Label')
plt.axhline(y=0,color='red',linestyle='--')
# there should be no clear pattern / curve in the plot
# we see a positive correlation between Test Label and Residuals -> later models should avoid this pattern
import scipy as sp
fig, ax = plt.subplots()
sp.stats.probplot(res,plot=ax);
```
DONE!
|
github_jupyter
|
# MODIFY!
# use RobustScaler!
model_name = 'rf-wo'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('./data/d-wo-ns.csv')
X = df.drop('throughput',axis=1)
y = df['throughput']
# Split the data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
# Scale the data
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# MODIFY!
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(warm_start=True)
model
p_grid = {
'max_features':['auto', 'sqrt', 'log2']
}
from sklearn.model_selection import GridSearchCV
grid_model = GridSearchCV(
estimator=model,
param_grid =p_grid,
scoring='neg_mean_squared_error',
cv=10,
verbose=1
)
grid_model.fit(X_train,y_train)
grid_model.best_params_
hp = pd.Series(name=f'{model_name} HP', data=grid_model.best_params_)
hp
hp.to_csv(f'./hyperparameters/{model_name}.csv')
# MODIFY!
import time
estimators = [5,10,25,50,100,500,1000,2000]
scores_list = []
from sklearn.ensemble import RandomForestRegressor
for e in estimators:
start = time.time()
print(f'Starting {e} estimators')
score_model = RandomForestRegressor(
n_estimators=e,
max_features=grid_model.best_params_['max_features']
)
from sklearn.model_selection import cross_validate
scores = cross_validate(
score_model,
X_train,
y_train,
scoring=[
'neg_mean_absolute_error',
'neg_mean_squared_error',
'neg_root_mean_squared_error'
],
cv=10
)
scores = pd.DataFrame(scores)
scores
mean_scores = scores.mean()
scores_list.append(mean_scores)
print(f'Done with {e} estimators')
end = time.time()
print(f'Elapsed time: {end - start}')
print('\n')
scores_df = pd.DataFrame(scores_list)
scores_df
scores_df['Est'] = estimators
scores_df
# mean_scores
scores_df = scores_df.rename(columns={
'fit_time':'Fit Time',
'score_time':'Score Time',
'test_neg_mean_absolute_error':'MAE',
'test_neg_mean_squared_error':'MSE',
'test_neg_root_mean_squared_error':'RMSE'
})
scores_df
scores_df[['MAE','MSE','RMSE']] = scores_df[['MAE','MSE','RMSE']].apply(lambda x : abs(x))
scores_df
# we go for 25 estimations. Note that from 25 -> 50 estimations the error metrics worsen
scores_df.to_csv(f'./est-scores/{model_name}-est.csv')
pct_scores_df = scores_df.pct_change()
pct_scores_df
pct_scores_df.style.background_gradient()
# we choose index 4, 100 Estimations <-
# Score time decrease 3->4 & Score time increase dramatically 4->5
# unsubstantial decrease MSE & RMSE 4->5 compared to 3->4
# MODIFY!
# adopt number of estimators from the previous section
score_model = RandomForestRegressor(n_estimators=100,warm_start=True)
score_model
from sklearn.model_selection import cross_validate
scores = cross_validate(
score_model,
X_train,
y_train,
scoring=[
'neg_mean_absolute_error',
'neg_mean_squared_error',
'neg_root_mean_squared_error'
],
cv=10
)
scores = pd.DataFrame(scores)
scores
mean_scores = scores.mean()
mean_scores
mean_scores = mean_scores.rename(f'{model_name}')
mean_scores[[2,3,4]] = mean_scores[[2,3,4]].apply(abs)
# mean_scores
mean_scores = mean_scores.rename({
'fit_time':'Fit Time',
'score_time':'Score Time',
'test_neg_mean_absolute_error':'MAE',
'test_neg_mean_squared_error':'MSE',
'test_neg_root_mean_squared_error':'RMSE'
})
mean_scores
mean_scores['STD FT'] = scores.fit_time.std()
mean_scores['STD ST'] = scores.score_time.std()
mean_scores['STD MAE'] = scores.test_neg_mean_absolute_error.std()
mean_scores['STD MSE'] = scores.test_neg_mean_squared_error.std()
mean_scores['STD RMSE'] = scores.test_neg_root_mean_squared_error.std()
mean_scores
mean_scores.to_csv(f'./scores/{model_name}.csv')
score_model.fit(X_train,y_train)
y_holdout = score_model.predict(X_test)
sns.histplot(y_holdout,bins=40,kde=True)
fig,ax = plt.subplots()
sns.kdeplot(y_test, ax=ax, shade=True, label='Observations')
sns.kdeplot(y_holdout, ax=ax,shade=True,label='Predictions')
ax.legend(loc='best')
fig,ax = plt.subplots()
sns.ecdfplot(y_test, ax=ax, label='Observations')
sns.ecdfplot(y_holdout, ax=ax,label='Predictions')
plt.axvline(x=y_test.mean(),color='grey',linestyle='--')
ax.legend(loc='best')
from sklearn.metrics import mean_absolute_error, mean_squared_error
mae = mean_absolute_error(y_test,y_holdout)
mse = mean_squared_error(y_test,y_holdout)
rmse = np.sqrt(mse)
err_df = pd.DataFrame(data=[mae, mse, rmse],index=['MAE','MSE','RMSE'],columns=[f'{model_name}'])
err_df
err_df.to_csv(f'./holdout-test/{model_name}.csv')
# mean_scores.to_csv(f'./opt-model-err/{model_name}-err.csv')
res = y_test - y_holdout
res.describe().drop('count')
sns.histplot(data=res, kde=True,bins=40)
ax = sns.scatterplot(x=y_test, y=res)
ax.set(ylabel='Residuals', xlabel='Test Label')
plt.axhline(y=0,color='red',linestyle='--')
# there should be no clear pattern / curve in the plot
# we see a positive correlation between Test Label and Residuals -> later models should avoid this pattern
import scipy as sp
fig, ax = plt.subplots()
sp.stats.probplot(res,plot=ax);
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
scaler.fit(X)
# MODIFY!
X = scaler.transform(X)
op_model = score_model
op_model.fit(X,y)
y_pred = op_model.predict(X)
sns.histplot(y_pred,bins=40,kde=True)
fig,ax = plt.subplots()
sns.kdeplot(y, ax=ax, shade=True, label='Observations')
sns.kdeplot(y_pred, ax=ax,shade=True,label='Predictions')
ax.legend(loc='best')
fig,ax = plt.subplots()
sns.ecdfplot(y, ax=ax, label='Observations')
sns.ecdfplot(y_pred, ax=ax,label='Predictions')
plt.axvline(x=y.mean(),color='grey',linestyle='--')
ax.legend(loc='best')
mae = mean_absolute_error(y,y_pred)
mse = mean_squared_error(y,y_pred)
rmse = np.sqrt(mse)
err_df = pd.DataFrame(data=[mae, mse, rmse],index=['MAE','MSE','RMSE'],columns=[f'{model_name}'])
err_df
err_df.to_csv(f'./model-err/{model_name}.csv')
# mean_scores.to_csv(f'./opt-model-err/{model_name}-err.csv')
from joblib import dump, load
dump(op_model, f'./opt-models/{model_name}.joblib')
res = y - y_pred
res.describe().drop('count')
sns.histplot(data=res, kde=True,bins=40)
ax = sns.scatterplot(x=y_test, y=res)
ax.set(ylabel='Residuals', xlabel='Test Label')
plt.axhline(y=0,color='red',linestyle='--')
# there should be no clear pattern / curve in the plot
# we see a positive correlation between Test Label and Residuals -> later models should avoid this pattern
import scipy as sp
fig, ax = plt.subplots()
sp.stats.probplot(res,plot=ax);
| 0.606732 | 0.818156 |
<a href="https://colab.research.google.com/github/unverciftci/Python_Programlama/blob/main/chapters/chapter1_basics.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Python ile Programlamaya Giriş
# 1. Bölüm: Temel Python Konuları
## Chapter Learning Objectives
<hr>
- Create, describe and differentiate standard Python datatypes such as `int`, `float`, `string`, `list`, `dict`, `tuple`, etc.
- Perform arithmetic operations like `+`, `-`, `*`, `**` on numeric values.
- Perform basic string operations like `.lower()`, `.split()` to manipulate strings.
- Compute boolean values using comparison operators operations (`==`, `!=`, `>`, etc.) and boolean operators (`and`, `or`, `not`).
- Assign, index, slice and subset values to and from tuples, lists, strings and dictionaries.
- Write a conditional statement with `if`, `elif` and `else`.
- Identify code blocks by levels of indentation.
- Explain the difference between mutable objects like a `list` and immutable objects like a `tuple`.
## 1. Introduction
<hr>
Python bir genel programlama dilidir. Dolayısıyla çok geniş bir yelpazeye sahiptir. Biz sadece veri bilimi ve uygulamalı matematik için yeterli olan konuları inceleyeceğiz. Bu bölümde Python programlama dilinin en temel bileşenleri tanıtılacaktır.
## 2. Basic Python Data Types
<hr>
A **value** is a piece of data that a computer program works with such as a number or text. There are different **types** of values: `42` is an integer and `"Hello!"` is a string. A **variable** is a name that refers to a value. In mathematics and statistics, we usually use variable names like $x$ and $y$. In Python, we can use any word as a variable name as long as it starts with a letter or an underscore. However, it should not be a [reserved word](https://docs.python.org/3.3/reference/lexical_analysis.html#keywords) in Python such as `for`, `while`, `class`, `lambda`, etc. as these words encode special functionality in Python that we don't want to overwrite!
It can be helpful to think of a variable as a box that holds some information (a single number, a vector, a string, etc). We use the **assignment operator** `=` to assign a value to a variable.
Değişken (variable), bir değere atıfta bulunan bir addır. Matematik ve istatistikte genellikle 𝑥 ve 𝑦 gibi değişken isimleri kullanırız. Python'da bir harf veya alt çizgi ile başladığı sürece herhangi bir kelimeyi değişken adı olarak kullanabiliriz. Değer (value), bir bilgisayar programının bir sayı veya metin gibi çalıştığı bir veri parçasıdır. Farklı değer türleri vardır: 42 bir tamsayıdır ve "Merhaba!" bir dizedir. Ancak, for, while, class, lambda vb. gibi Python'da ayrılmış bir kelime olmamalıdır, çünkü bu kelimeler Python'da üzerine yazmak istemediğimiz özel işlevleri kodlar!
Bir değişkeni bazı bilgileri (tek bir sayı, bir vektör, bir dize, vb.) içeren bir kutu olarak düşünmek faydalı olabilir. Bir değişkene değer atamak için = atama operatörünü kullanırız.

Image modified from: [medium.com](https://www.google.com/url?sa=i&url=https%3A%2F%2Fmedium.com%2F%40stevenpcurtis.sc%2Fwhat-is-a-variable-3447ac1331b9&psig=AOvVaw3YbYfgb7XFOJ_sHP5eliob&ust=1595365663851000&source=images&cd=vfe&ved=0CA0QjhxqFwoTCMi8nrfe3OoCFQAAAAAdAAAAABAZ)
```{tip}
See the [Python 3 documentation](https://docs.python.org/3/library/stdtypes.html) for a summary of the standard built-in Python datatypes.
```
### Önceden Tanımlı Veri Tipleri (built-in data types)
| İngilizcesi | Tip İsmi | Tip Sınıfı | Açıklama | Örnek |
| :-------------------- | :--------- | :------------- | :-------------------------------------------- | :----------------------------------------- |
| integer | `int` | Numeric Type | positifi veya negatif tam sayı | `42` |
| floating point number | `float` | Numeric Type | ondalılklı reel sayı | `3.14159` |
| boolean | `bool` | Boolean Values | doğru veya yanlış | `True` |
| string | `str` | Sequence Type | düzyazı | `"I Can Has Cheezburger?"` |
| list | `list` | Sequence Type | sıralı ve değiştirilebilir nesneler | `['Ali', 'Xinyi', 'Miriam']` |
| tuple | `tuple` | Sequence Type | sıralı ve değiştirilemez nesneler | `('Thursday', 6, 9, 2018)` |
| dictionary | `dict` | Mapping Type | anahtar-değer eşlemesi | `{'name':'DSCI', 'code':511, 'credits':2}` |
| none | `NoneType` | Null Object | değer atanmamış | `None` |
### Sayısal Veri Tipleri
There are three distinct numeric types: `integers`, `floating point numbers`, and `complex numbers` (not covered here). We can determine the type of an object in Python using `type()`. We can print the value of the object using `print()`.
Üç farklı sayısal tür vardır: tamsayılar, kayan noktalı sayılar ve karmaşık sayılar (burada ele alınmamaktadır). Python'da type() kullanarak bir nesnenin tipini belirleyebiliriz. print() kullanarak nesnenin değerini yazdırabiliriz.
```
x = 42
type(x)
print(x)
```
In Jupyter/IPython (an interactive version of Python), the last line of a cell will automatically be printed to screen so we don't actually need to explicitly call `print()`.
Jupyter/IPython'da (Python'un etkileşimli bir versiyonu), bir hücrenin son satırı otomatik olarak ekrana yazdırılacaktır, bu nedenle aslında açıkça 'print()' çağırmamız gerekmez.
```
x # Anything after the pound/hash symbol is a comment and will not be run
pi = 3.14159
pi
type(pi)
```
### Cebirsel İşlemler
Python'da yaygın olarak kullanılan aritmetik işlemler için sözdizimi tablosu aşağıdadır:
| İşlem | Açıklama |
| :------: | :--------------: |
| `+` | toplama |
| `-` | çıkarma |
| `*` | çarpma |
| `/` | bölme |
| `**` | kuvvet |
| `//` | kalansız bölme |
| `%` | kalanı bulma |
Let's have a go at applying these operators to numeric types and observe the results.
Şimdi bu operatörleri sayısal türlere uygulayalım ve sonuçları gözlemleyelim.
```
1 + 2 + 3 + 4 + 5 # add
2 * 3.14159 # multiply
2 ** 10 # exponent
```
Division may produce a different `dtype` than expected, it will change `int` to `float`.
```
int_2 = 2
type(int_2)
int_2 / int_2 # divison
type(int_2 / int_2)
```
But the syntax `//` allows us to do "integer division" (aka "floor division") and retain the `int` data type, it always rounds down.
```
101 / 2
101 // 2 # "floor division" - always rounds down
```
We refer to this as "integer division" or "floor division" because it's like calling `int` on the result of a division, which rounds down to the nearest integer, or "floors" the result.
```
int(101 / 2)
```
The `%` "modulo" operator gives us the remainder after division.
```
100 % 2 # "100 mod 2", or the remainder when 100 is divided by 2
101 % 2 # "101 mod 2", or the remainder when 101 is divided by 2
100.5 % 2
```
### None
`NoneType` is its own type in Python. It only has one possible value, `None` - it represents an object with no value. We'll see it again in a later chapter.
```
x = None
print(x)
type(x)
```
### Strings
Text is stored as a data type called a `string`. We can think of a string as a sequence of characters.
```{tip}
Actually they are a sequence of Unicode code points. Here's a [great blog post](https://www.joelonsoftware.com/2003/10/08/the-absolute-minimum-every-software-developer-absolutely-positively-must-know-about-unicode-and-character-sets-no-excuses/) on Unicode if you're interested.
```
We write strings as characters enclosed with either:
- single quotes, e.g., `'Hello'`
- double quotes, e.g., `"Goodbye"`
There's no difference between the two methods, but there are cases where having both is useful (more on that below)! We also have triple double quotes, which are typically used for function documentation (more on that in a later chapter), e.g., `"""This function adds two numbers"""`.
```
my_name = "Tomas Beuzen"
my_name
type(my_name)
course = 'DSCI 511'
course
type(course)
```
If the string contains a quotation or apostrophe, we can use a combination of single and double quotes to define the string.
```
sentence = "It's a rainy day."
sentence
type(sentence)
quote = 'Donald Knuth: "Premature optimization is the root of all evil."'
quote
```
### Boolean
The Boolean (`bool`) type has two values: `True` and `False`.
```
the_truth = True
the_truth
type(the_truth)
lies = False
lies
type(lies)
```
### Comparison Operators
We can compare objects using comparison operators, and we'll get back a Boolean result:
| Operator | Description |
| :-------- | :----------------------------------- |
| `x == y ` | is `x` equal to `y`? |
| `x != y` | is `x` not equal to `y`? |
| `x > y` | is `x` greater than `y`? |
| `x >= y` | is `x` greater than or equal to `y`? |
| `x < y` | is `x` less than `y`? |
| `x <= y` | is `x` less than or equal to `y`? |
| `x is y` | is `x` the same object as `y`? |
```
2 < 3
"Deep learning" == "Solve all the world's problems"
2 != "2"
2 is 2
2 == 2.0
```
### Boolean Operators
We also have so-called "boolean operators" which also evaluates to either `True` or `False`:
| Operator | Description |
| :---: | :--- |
|`x and y`| are `x` and `y` both True? |
|`x or y` | is at least one of `x` and `y` True? |
| `not x` | is `x` False? |
```
True and True
True and False
True or False
False or False
("Python 2" != "Python 3") and (2 <= 3)
True
not True
not not True
```
```{note}
Python also has [bitwise operators](https://wiki.python.org/moin/BitwiseOperators) like `&` and `|`. Bitwise operators literally compare the bits of two integers. That's beyond the scope of this course but I've included a code snippet below to show you them in action.
```
```
print(f"Bit representation of the number 5: {5:0b}")
print(f"Bit representation of the number 4: {4:0b}")
print(f" ↓↓↓")
print(f" {5 & 4:0b}")
print(f" ↓ ")
print(f" {5 & 4}")
```
### Casting
Sometimes we need to explicitly **cast** a value from one type to another. We can do this using functions like `str()`, `int()`, and `float()`. Python tries to do the conversion, or throws an error if it can't.
```
x = 5.0
type(x)
x = int(5.0)
x
type(x)
x = str(5.0)
x
type(x)
str(5.0) == 5.0
int(5.3)
float("hello")
```
## 3. Lists and Tuples
<hr>
Lists and tuples allow us to store multiple things ("elements") in a single object. The elements are _ordered_ (we'll explore what that means a little later). We'll start with lists. Lists are defined with square brackets `[]`.
```
my_list = [1, 2, "THREE", 4, 0.5]
my_list
type(my_list)
```
Lists can hold any datatype - even other lists!
```
another_list = [1, "two", [3, 4, "five"], True, None, {"key": "value"}]
another_list
```
You can get the length of the list with the function `len()`:
```
len(my_list)
```
Tuples look similar to lists but have a key difference (they are immutable - but more on that a bit later). They are defined with parentheses `()`.
```
today = (1, 2, "THREE", 4, 0.5)
today
type(today)
len(today)
```
### Indexing and Slicing Sequences
We can access values inside a list, tuple, or string using square bracket syntax. Python uses *zero-based indexing*, which means the first element of the list is in position 0, not position 1.
```
my_list
my_list[0]
my_list[2]
len(my_list)
my_list[5]
```
We can use negative indices to count backwards from the end of the list.
```
my_list
my_list[-1]
my_list[-2]
```
We can use the colon `:` to access a sub-sequence. This is called "slicing".
```
my_list[1:3]
```
Note from the above that the start of the slice is inclusive and the end is exclusive. So `my_list[1:3]` fetches elements 1 and 2, but not 3.
Strings behave the same as lists and tuples when it comes to indexing and slicing. Remember, we think of them as a *sequence* of characters.
```
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet[0]
alphabet[-1]
alphabet[-3]
alphabet[:5]
alphabet[12:20]
```
### List Methods
A list is an object and it has methods for interacting with its data. A method is like a function, it performs some operation with the data, but a method differs to a function in that it is defined on the object itself and accessed using a period `.`. For example, `my_list.append(item)` appends an item to the end of the list called `my_list`. You can see the documentation for more [list methods](https://docs.python.org/3/tutorial/datastructures.html#more-on-lists).
```
primes = [2, 3, 5, 7, 11]
primes
len(primes)
primes.append(13)
primes
```
### Sets
Another built-in Python data type is the `set`, which stores an _un-ordered_ list of _unique_ items. Being unordered, sets do not record element position or order of insertion and so do not support indexing.
```
s = {2, 3, 5, 11}
s
{1, 2, 3} == {3, 2, 1}
[1, 2, 3] == [3, 2, 1]
s.add(2) # does nothing
s
s[0]
```
Above: throws an error because elements are not ordered and can't be indexing.
### Mutable vs. Immutable Types
Strings and tuples are immutable types which means they can't be modified. Lists are mutable and we can assign new values for its various entries. This is the main difference between lists and tuples.
```
names_list = ["Indiana", "Fang", "Linsey"]
names_list
names_list[0] = "Cool guy"
names_list
names_tuple = ("Indiana", "Fang", "Linsey")
names_tuple
names_tuple[0] = "Not cool guy"
```
Same goes for strings. Once defined we cannot modifiy the characters of the string.
```
my_name = "Tom"
my_name[-1] = "q"
x = ([1, 2, 3], 5)
x[1] = 7
x
x[0][1] = 4
x
```
## 4. String Methods
<hr>
There are various useful string methods in Python.
```
all_caps = "HOW ARE YOU TODAY?"
all_caps
new_str = all_caps.lower()
new_str
```
Note that the method lower doesn't change the original string but rather returns a new one.
```
all_caps
```
There are *many* string methods. Check out the [documentation](https://docs.python.org/3/library/stdtypes.html#string-methods).
```
all_caps.split()
all_caps.count("O")
```
One can explicitly cast a string to a list:
```
caps_list = list(all_caps)
caps_list
"".join(caps_list)
"-".join(caps_list)
```
We can also chain multiple methods together (more on this when we get to NumPy and Pandas in later chapters):
```
"".join(caps_list).lower().split(" ")
```
### String formatting
Python has ways of creating strings by "filling in the blanks" and formatting them nicely. This is helpful for when you want to print statements that include variables or statements. There are a few ways of doing this but I use and recommend [f-strings](https://docs.python.org/3.6/whatsnew/3.6.html#whatsnew36-pep498) which were introduced in Python 3.6. All you need to do is put the letter "f" out the front of your string and then you can include variables with curly-bracket notation `{}`.
```
name = "Newborn Baby"
age = 4 / 12
day = 10
month = 6
year = 2020
template_new = f"Hello, my name is {name}. I am {age:.2f} years old. I was born {day}/{month:02}/{year}."
template_new
```
```{note} Notes require **no** arguments,
In the code above, the notation after the colon in my curly braces is for formatting. For example, `:.2f` means, print this variable with 2 decimal places. See format code options [here](https://docs.python.org/3.4/library/string.html#format-specification-mini-language).
```
## 5. Dictionaries
<hr>
A dictionary is a mapping between key-values pairs and is defined with curly-brackets:
```
house = {
"bedrooms": 3,
"bathrooms": 2,
"city": "Vancouver",
"price": 2499999,
"date_sold": (1, 3, 2015),
}
condo = {
"bedrooms": 2,
"bathrooms": 1,
"city": "Burnaby",
"price": 699999,
"date_sold": (27, 8, 2011),
}
```
We can access a specific field of a dictionary with square brackets:
```
house["price"]
condo["city"]
```
We can also edit dictionaries (they are mutable):
```
condo["price"] = 5 # price already in the dict
condo
condo["flooring"] = "wood"
condo
```
We can also delete fields entirely (though I rarely use this):
```
del condo["city"]
condo
```
And we can easily add fields:
```
condo[5] = 443345
condo
```
Keys may be any immutable data type, even a `tuple`!
```
condo[(1, 2, 3)] = 777
condo
```
You'll get an error if you try to access a non-existent key:
```
condo["not-here"]
```
## 6. Empties
Sometimes you'll want to create empty objects that will be filled later on.
```
lst = list() # empty list
lst
lst = [] # empty list
lst
```
There's no real difference between the two methods above, `[]` is apparently [marginally faster](https://stackoverflow.com/questions/2972212/creating-an-empty-list-in-python)...
```
tup = tuple() # empty tuple
tup
tup = () # empty tuple
tup
dic = dict() # empty dict
dic
dic = {} # empty dict
dic
st = set() # empty set
st
```
## 7. Conditionals
<hr>
[Conditional statements](https://docs.python.org/3/tutorial/controlflow.html) allow us to write programs where only certain blocks of code are executed depending on the state of the program. Let's look at some examples and take note of the keywords, syntax and indentation.
```
name = "Tom"
if name.lower() == "tom":
print("That's my name too!")
elif name.lower() == "santa":
print("That's a funny name.")
else:
print(f"Hello {name}! That's a cool name!")
print("Nice to meet you!")
```
The main points to notice:
- Use keywords `if`, `elif` and `else`
- The colon `:` ends each conditional expression
- Indentation (by 4 empty space) defines code blocks
- In an `if` statement, the first block whose conditional statement returns `True` is executed and the program exits the `if` block
- `if` statements don't necessarily need `elif` or `else`
- `elif` lets us check several conditions
- `else` lets us evaluate a default block if all other conditions are `False`
- the end of the entire `if` statement is where the indentation returns to the same level as the first `if` keyword
If statements can also be **nested** inside of one another:
```
name = "Super Tom"
if name.lower() == "tom":
print("That's my name too!")
elif name.lower() == "santa":
print("That's a funny name.")
else:
print(f"Hello {name}! That's a cool name.")
if name.lower().startswith("super"):
print("Do you really have superpowers?")
print("Nice to meet you!")
```
### Inline if/else
We can write simple `if` statements "inline", i.e., in a single line, for simplicity.
```
words = ["the", "list", "of", "words"]
x = "long list" if len(words) > 10 else "short list"
x
if len(words) > 10:
x = "long list"
else:
x = "short list"
x
```
### Truth Value Testing
Any object can be tested for "truth" in Python, for use in `if` and `while` (next chapter) statements.
- `True` values: all objects return `True` unless they are a `bool` object with value `False` or have `len()` == 0
- `False` values: `None`, `False`, `0`, empty sequences and collections: `''`, `()`, `[]`, `{}`, `set()`
```{tip}
Read more in the [docs here](https://docs.python.org/3/library/stdtypes.html#truth-value-testing).
```
```
x = 1
if x:
print("I'm truthy!")
else:
print("I'm falsey!")
x = False
if x:
print("I'm truthy!")
else:
print("I'm falsey!")
x = []
if x:
print("I'm truthy!")
else:
print("I'm falsey!")
```
### Short-circuiting
Python supports a concept known as "short-circuting". This is the automatic stopping of the execution of boolean operation if the truth value of expression has already been determined.
```
fake_variable # not defined
True or fake_variable
True and fake_variable
False and fake_variable
```
|Expression|Result|Detail|
|---|---|---|
|A or B|If A is `True` then A else B|B only executed if A is `False`|
|A and B|If A is `False` then A else B|B only executed if A is `True`|
|
github_jupyter
|
### Önceden Tanımlı Veri Tipleri (built-in data types)
| İngilizcesi | Tip İsmi | Tip Sınıfı | Açıklama | Örnek |
| :-------------------- | :--------- | :------------- | :-------------------------------------------- | :----------------------------------------- |
| integer | `int` | Numeric Type | positifi veya negatif tam sayı | `42` |
| floating point number | `float` | Numeric Type | ondalılklı reel sayı | `3.14159` |
| boolean | `bool` | Boolean Values | doğru veya yanlış | `True` |
| string | `str` | Sequence Type | düzyazı | `"I Can Has Cheezburger?"` |
| list | `list` | Sequence Type | sıralı ve değiştirilebilir nesneler | `['Ali', 'Xinyi', 'Miriam']` |
| tuple | `tuple` | Sequence Type | sıralı ve değiştirilemez nesneler | `('Thursday', 6, 9, 2018)` |
| dictionary | `dict` | Mapping Type | anahtar-değer eşlemesi | `{'name':'DSCI', 'code':511, 'credits':2}` |
| none | `NoneType` | Null Object | değer atanmamış | `None` |
### Sayısal Veri Tipleri
There are three distinct numeric types: `integers`, `floating point numbers`, and `complex numbers` (not covered here). We can determine the type of an object in Python using `type()`. We can print the value of the object using `print()`.
Üç farklı sayısal tür vardır: tamsayılar, kayan noktalı sayılar ve karmaşık sayılar (burada ele alınmamaktadır). Python'da type() kullanarak bir nesnenin tipini belirleyebiliriz. print() kullanarak nesnenin değerini yazdırabiliriz.
In Jupyter/IPython (an interactive version of Python), the last line of a cell will automatically be printed to screen so we don't actually need to explicitly call `print()`.
Jupyter/IPython'da (Python'un etkileşimli bir versiyonu), bir hücrenin son satırı otomatik olarak ekrana yazdırılacaktır, bu nedenle aslında açıkça 'print()' çağırmamız gerekmez.
### Cebirsel İşlemler
Python'da yaygın olarak kullanılan aritmetik işlemler için sözdizimi tablosu aşağıdadır:
| İşlem | Açıklama |
| :------: | :--------------: |
| `+` | toplama |
| `-` | çıkarma |
| `*` | çarpma |
| `/` | bölme |
| `**` | kuvvet |
| `//` | kalansız bölme |
| `%` | kalanı bulma |
Let's have a go at applying these operators to numeric types and observe the results.
Şimdi bu operatörleri sayısal türlere uygulayalım ve sonuçları gözlemleyelim.
Division may produce a different `dtype` than expected, it will change `int` to `float`.
But the syntax `//` allows us to do "integer division" (aka "floor division") and retain the `int` data type, it always rounds down.
We refer to this as "integer division" or "floor division" because it's like calling `int` on the result of a division, which rounds down to the nearest integer, or "floors" the result.
The `%` "modulo" operator gives us the remainder after division.
### None
`NoneType` is its own type in Python. It only has one possible value, `None` - it represents an object with no value. We'll see it again in a later chapter.
### Strings
Text is stored as a data type called a `string`. We can think of a string as a sequence of characters.
We write strings as characters enclosed with either:
- single quotes, e.g., `'Hello'`
- double quotes, e.g., `"Goodbye"`
There's no difference between the two methods, but there are cases where having both is useful (more on that below)! We also have triple double quotes, which are typically used for function documentation (more on that in a later chapter), e.g., `"""This function adds two numbers"""`.
If the string contains a quotation or apostrophe, we can use a combination of single and double quotes to define the string.
### Boolean
The Boolean (`bool`) type has two values: `True` and `False`.
### Comparison Operators
We can compare objects using comparison operators, and we'll get back a Boolean result:
| Operator | Description |
| :-------- | :----------------------------------- |
| `x == y ` | is `x` equal to `y`? |
| `x != y` | is `x` not equal to `y`? |
| `x > y` | is `x` greater than `y`? |
| `x >= y` | is `x` greater than or equal to `y`? |
| `x < y` | is `x` less than `y`? |
| `x <= y` | is `x` less than or equal to `y`? |
| `x is y` | is `x` the same object as `y`? |
### Boolean Operators
We also have so-called "boolean operators" which also evaluates to either `True` or `False`:
| Operator | Description |
| :---: | :--- |
|`x and y`| are `x` and `y` both True? |
|`x or y` | is at least one of `x` and `y` True? |
| `not x` | is `x` False? |
### Casting
Sometimes we need to explicitly **cast** a value from one type to another. We can do this using functions like `str()`, `int()`, and `float()`. Python tries to do the conversion, or throws an error if it can't.
## 3. Lists and Tuples
<hr>
Lists and tuples allow us to store multiple things ("elements") in a single object. The elements are _ordered_ (we'll explore what that means a little later). We'll start with lists. Lists are defined with square brackets `[]`.
Lists can hold any datatype - even other lists!
You can get the length of the list with the function `len()`:
Tuples look similar to lists but have a key difference (they are immutable - but more on that a bit later). They are defined with parentheses `()`.
### Indexing and Slicing Sequences
We can access values inside a list, tuple, or string using square bracket syntax. Python uses *zero-based indexing*, which means the first element of the list is in position 0, not position 1.
We can use negative indices to count backwards from the end of the list.
We can use the colon `:` to access a sub-sequence. This is called "slicing".
Note from the above that the start of the slice is inclusive and the end is exclusive. So `my_list[1:3]` fetches elements 1 and 2, but not 3.
Strings behave the same as lists and tuples when it comes to indexing and slicing. Remember, we think of them as a *sequence* of characters.
### List Methods
A list is an object and it has methods for interacting with its data. A method is like a function, it performs some operation with the data, but a method differs to a function in that it is defined on the object itself and accessed using a period `.`. For example, `my_list.append(item)` appends an item to the end of the list called `my_list`. You can see the documentation for more [list methods](https://docs.python.org/3/tutorial/datastructures.html#more-on-lists).
### Sets
Another built-in Python data type is the `set`, which stores an _un-ordered_ list of _unique_ items. Being unordered, sets do not record element position or order of insertion and so do not support indexing.
Above: throws an error because elements are not ordered and can't be indexing.
### Mutable vs. Immutable Types
Strings and tuples are immutable types which means they can't be modified. Lists are mutable and we can assign new values for its various entries. This is the main difference between lists and tuples.
Same goes for strings. Once defined we cannot modifiy the characters of the string.
## 4. String Methods
<hr>
There are various useful string methods in Python.
Note that the method lower doesn't change the original string but rather returns a new one.
There are *many* string methods. Check out the [documentation](https://docs.python.org/3/library/stdtypes.html#string-methods).
One can explicitly cast a string to a list:
We can also chain multiple methods together (more on this when we get to NumPy and Pandas in later chapters):
### String formatting
Python has ways of creating strings by "filling in the blanks" and formatting them nicely. This is helpful for when you want to print statements that include variables or statements. There are a few ways of doing this but I use and recommend [f-strings](https://docs.python.org/3.6/whatsnew/3.6.html#whatsnew36-pep498) which were introduced in Python 3.6. All you need to do is put the letter "f" out the front of your string and then you can include variables with curly-bracket notation `{}`.
## 5. Dictionaries
<hr>
A dictionary is a mapping between key-values pairs and is defined with curly-brackets:
We can access a specific field of a dictionary with square brackets:
We can also edit dictionaries (they are mutable):
We can also delete fields entirely (though I rarely use this):
And we can easily add fields:
Keys may be any immutable data type, even a `tuple`!
You'll get an error if you try to access a non-existent key:
## 6. Empties
Sometimes you'll want to create empty objects that will be filled later on.
There's no real difference between the two methods above, `[]` is apparently [marginally faster](https://stackoverflow.com/questions/2972212/creating-an-empty-list-in-python)...
## 7. Conditionals
<hr>
[Conditional statements](https://docs.python.org/3/tutorial/controlflow.html) allow us to write programs where only certain blocks of code are executed depending on the state of the program. Let's look at some examples and take note of the keywords, syntax and indentation.
The main points to notice:
- Use keywords `if`, `elif` and `else`
- The colon `:` ends each conditional expression
- Indentation (by 4 empty space) defines code blocks
- In an `if` statement, the first block whose conditional statement returns `True` is executed and the program exits the `if` block
- `if` statements don't necessarily need `elif` or `else`
- `elif` lets us check several conditions
- `else` lets us evaluate a default block if all other conditions are `False`
- the end of the entire `if` statement is where the indentation returns to the same level as the first `if` keyword
If statements can also be **nested** inside of one another:
### Inline if/else
We can write simple `if` statements "inline", i.e., in a single line, for simplicity.
### Truth Value Testing
Any object can be tested for "truth" in Python, for use in `if` and `while` (next chapter) statements.
- `True` values: all objects return `True` unless they are a `bool` object with value `False` or have `len()` == 0
- `False` values: `None`, `False`, `0`, empty sequences and collections: `''`, `()`, `[]`, `{}`, `set()`
### Short-circuiting
Python supports a concept known as "short-circuting". This is the automatic stopping of the execution of boolean operation if the truth value of expression has already been determined.
| 0.574156 | 0.988525 |
## PySAL Change Log Statistics
This notebook generates the summary statistics for a package.
It assumes you are running this under the `tools` directory at the toplevel of the package
## Change the values only in the next cell
```
package_name = 'spglm'
release_date = '2020-9-8'
start_date = '2019-7-18'
```
This notebook will generate a file in the current directory with the name "changelog_VERSION.md". You can edit and append this on front of the CHANGELOG file for the package release.
```
from __future__ import print_function
import os
import json
import re
import sys
import pandas
from datetime import datetime, timedelta
from time import sleep
from subprocess import check_output
try:
from urllib import urlopen
except:
from urllib.request import urlopen
import ssl
import yaml
context = ssl._create_unverified_context()
CWD = os.path.abspath(os.path.curdir)
CWD
since_date = '--since="{start}"'.format(start=start_date)
since_date
since = datetime.strptime(start_date+" 0:0:0", "%Y-%m-%d %H:%M:%S")
since
# get __version__
f = "../{package}/__init__.py".format(package=package_name)
with open(f, 'r') as initfile:
exec(initfile.readline())
```
## Total commits by subpackage
```
cmd = ['git', 'log', '--oneline', since_date]
ncommits = len(check_output(cmd).splitlines())
ncommits
```
## List Contributors
Some of our contributors have many aliases for the same identity. So, we've added a mapping to make sure that individuals are listed once (and only once).
```
identities = {'Levi John Wolf': ('ljwolf', 'Levi John Wolf'),
'Serge Rey': ('Serge Rey', 'Sergio Rey', 'sjsrey', 'serge'),
'Wei Kang': ('Wei Kang', 'weikang9009'),
'Dani Arribas-Bel': ('Dani Arribas-Bel', 'darribas')
}
def regularize_identity(string):
string = string.decode()
for name, aliases in identities.items():
for alias in aliases:
if alias in string:
string = string.replace(alias, name)
if len(string.split(' '))>1:
string = string.title()
return string.lstrip('* ')
author_cmd = ['git', 'log', '--format=* %aN', since_date]
from collections import Counter
ncommits = len(check_output(cmd).splitlines())
all_authors = check_output(author_cmd).splitlines()
counter = Counter([regularize_identity(author) for author in all_authors])
# global_counter += counter
# counters.update({'.'.join((package,subpackage)): counter})
unique_authors = sorted(set(all_authors))
unique_authors = counter.keys()
unique_authors
```
## Disaggregate by PR, Issue
```
from datetime import datetime, timedelta
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def parse_link_header(headers):
link_s = headers.get('link', '')
urls = element_pat.findall(link_s)
rels = rel_pat.findall(link_s)
d = {}
for rel,url in zip(rels, urls):
d[rel] = url
return d
def get_paged_request(url):
"""get a full list, handling APIv3's paging"""
results = []
while url:
#print("fetching %s" % url, file=sys.stderr)
f = urlopen(url)
results.extend(json.load(f))
links = parse_link_header(f.headers)
url = links.get('next')
return results
def get_issues(project="pysal/pysal", state="closed", pulls=False):
"""Get a list of the issues from the Github API."""
which = 'pulls' if pulls else 'issues'
url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % (project, which, state, PER_PAGE)
return get_paged_request(url)
def _parse_datetime(s):
"""Parse dates in the format returned by the Github API."""
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
"""Convert a list of issues to a dict, keyed by issue number."""
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return 'pull_request_url' in issue
def issues_closed_since(period=timedelta(days=365), project="pysal/pysal", pulls=False):
"""Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present."""
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
period = datetime.now() - period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, period.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url)
# allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period)
filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period]
# exclude rejected PRs
if pulls:
filtered = [ pr for pr in filtered if pr['merged_at'] ]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
"""Return a list of issues sorted by closing date date."""
return sorted(issues, key = lambda i:i[field], reverse=reverse)
def report(issues, show_urls=False):
"""Summary report about a list of issues, printing number and title.
"""
# titles may have unicode in them, so we must encode everything below
if show_urls:
for i in issues:
role = 'ghpull' if 'merged_at' in i else 'ghissue'
print('* :%s:`%d`: %s' % (role, i['number'],
i['title'].encode('utf-8')))
else:
for i in issues:
print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))
all_issues = {}
all_pulls = {}
total_commits = 0
#prj='pysal/libpysal'
prj = 'pysal/{package}'.format(package=package_name)
issues = issues_closed_since(since, project=prj,pulls=False)
pulls = issues_closed_since(since, project=prj,pulls=True)
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
issue_listing = []
for issue in issues:
entry = "{title} (#{number})".format(title=issue['title'],number=issue['number'])
issue_listing.append(entry)
pull_listing = []
for pull in pulls:
entry = "{title} (#{number})".format(title=pull['title'],number=pull['number'])
pull_listing.append(entry)
pull_listing
message = "We closed a total of {total} issues (enhancements and bug fixes) through {pr} pull requests".format(total=n_total, pr=n_pulls)
message = "{msg}, since our last release on {previous}.".format(msg=message, previous=str(start_date))
message
message += "\n\n## Issues Closed\n"
print(message)
issues = "\n".join([" - "+issue for issue in issue_listing])
message += issues
message += "\n\n## Pull Requests\n"
pulls = "\n".join([" - "+pull for pull in pull_listing])
message += pulls
print(message)
people = "\n".join([" - "+person for person in unique_authors])
print(people)
message +="\n\nThe following individuals contributed to this release:\n\n{people}".format(people=people)
print(message)
head = "# Changes\n\nVersion {version} ({release_date})\n\n".format(version=__version__, release_date=release_date)
print(head+message)
outfile = 'changelog_{version}.md'.format(version=__version__)
with open(outfile, 'w') as of:
of.write(head+message)
```
|
github_jupyter
|
package_name = 'spglm'
release_date = '2020-9-8'
start_date = '2019-7-18'
from __future__ import print_function
import os
import json
import re
import sys
import pandas
from datetime import datetime, timedelta
from time import sleep
from subprocess import check_output
try:
from urllib import urlopen
except:
from urllib.request import urlopen
import ssl
import yaml
context = ssl._create_unverified_context()
CWD = os.path.abspath(os.path.curdir)
CWD
since_date = '--since="{start}"'.format(start=start_date)
since_date
since = datetime.strptime(start_date+" 0:0:0", "%Y-%m-%d %H:%M:%S")
since
# get __version__
f = "../{package}/__init__.py".format(package=package_name)
with open(f, 'r') as initfile:
exec(initfile.readline())
cmd = ['git', 'log', '--oneline', since_date]
ncommits = len(check_output(cmd).splitlines())
ncommits
identities = {'Levi John Wolf': ('ljwolf', 'Levi John Wolf'),
'Serge Rey': ('Serge Rey', 'Sergio Rey', 'sjsrey', 'serge'),
'Wei Kang': ('Wei Kang', 'weikang9009'),
'Dani Arribas-Bel': ('Dani Arribas-Bel', 'darribas')
}
def regularize_identity(string):
string = string.decode()
for name, aliases in identities.items():
for alias in aliases:
if alias in string:
string = string.replace(alias, name)
if len(string.split(' '))>1:
string = string.title()
return string.lstrip('* ')
author_cmd = ['git', 'log', '--format=* %aN', since_date]
from collections import Counter
ncommits = len(check_output(cmd).splitlines())
all_authors = check_output(author_cmd).splitlines()
counter = Counter([regularize_identity(author) for author in all_authors])
# global_counter += counter
# counters.update({'.'.join((package,subpackage)): counter})
unique_authors = sorted(set(all_authors))
unique_authors = counter.keys()
unique_authors
from datetime import datetime, timedelta
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def parse_link_header(headers):
link_s = headers.get('link', '')
urls = element_pat.findall(link_s)
rels = rel_pat.findall(link_s)
d = {}
for rel,url in zip(rels, urls):
d[rel] = url
return d
def get_paged_request(url):
"""get a full list, handling APIv3's paging"""
results = []
while url:
#print("fetching %s" % url, file=sys.stderr)
f = urlopen(url)
results.extend(json.load(f))
links = parse_link_header(f.headers)
url = links.get('next')
return results
def get_issues(project="pysal/pysal", state="closed", pulls=False):
"""Get a list of the issues from the Github API."""
which = 'pulls' if pulls else 'issues'
url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % (project, which, state, PER_PAGE)
return get_paged_request(url)
def _parse_datetime(s):
"""Parse dates in the format returned by the Github API."""
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
"""Convert a list of issues to a dict, keyed by issue number."""
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return 'pull_request_url' in issue
def issues_closed_since(period=timedelta(days=365), project="pysal/pysal", pulls=False):
"""Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present."""
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
period = datetime.now() - period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, period.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url)
# allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period)
filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period]
# exclude rejected PRs
if pulls:
filtered = [ pr for pr in filtered if pr['merged_at'] ]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
"""Return a list of issues sorted by closing date date."""
return sorted(issues, key = lambda i:i[field], reverse=reverse)
def report(issues, show_urls=False):
"""Summary report about a list of issues, printing number and title.
"""
# titles may have unicode in them, so we must encode everything below
if show_urls:
for i in issues:
role = 'ghpull' if 'merged_at' in i else 'ghissue'
print('* :%s:`%d`: %s' % (role, i['number'],
i['title'].encode('utf-8')))
else:
for i in issues:
print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))
all_issues = {}
all_pulls = {}
total_commits = 0
#prj='pysal/libpysal'
prj = 'pysal/{package}'.format(package=package_name)
issues = issues_closed_since(since, project=prj,pulls=False)
pulls = issues_closed_since(since, project=prj,pulls=True)
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
issue_listing = []
for issue in issues:
entry = "{title} (#{number})".format(title=issue['title'],number=issue['number'])
issue_listing.append(entry)
pull_listing = []
for pull in pulls:
entry = "{title} (#{number})".format(title=pull['title'],number=pull['number'])
pull_listing.append(entry)
pull_listing
message = "We closed a total of {total} issues (enhancements and bug fixes) through {pr} pull requests".format(total=n_total, pr=n_pulls)
message = "{msg}, since our last release on {previous}.".format(msg=message, previous=str(start_date))
message
message += "\n\n## Issues Closed\n"
print(message)
issues = "\n".join([" - "+issue for issue in issue_listing])
message += issues
message += "\n\n## Pull Requests\n"
pulls = "\n".join([" - "+pull for pull in pull_listing])
message += pulls
print(message)
people = "\n".join([" - "+person for person in unique_authors])
print(people)
message +="\n\nThe following individuals contributed to this release:\n\n{people}".format(people=people)
print(message)
head = "# Changes\n\nVersion {version} ({release_date})\n\n".format(version=__version__, release_date=release_date)
print(head+message)
outfile = 'changelog_{version}.md'.format(version=__version__)
with open(outfile, 'w') as of:
of.write(head+message)
| 0.41324 | 0.660569 |
# 未来価格の予測
## 予測対象: 未来の価格と今の価格の値上がり率(対数)
$$y = \log{\frac{future\_price}{current\_price}} = \log{future\_price} - \log{current\_price}$$
直接求めるか,未来価格を予測して比率を計算するか.
## 使用する特徴量
- 画像の特徴量(SwinTransformerで抽出)
- 直近50件の価格推移(LSTMで抽出)
- 各コレクションの価格平均
## モデルの構成
- 画像の特徴量をSwinTransformerで抽出(64)
- 価格推移を特徴量として抽出
- 上2つと価格平均を結合して,機械学習モデルで予測(モデルは要検討)
### 結果
- linear regression: mae 0.322
- lightgbm: mae 0.246
- NN: 0.275
```
from comet_ml import Experiment
import os
import sys
import gc
import warnings
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import KFold, train_test_split, StratifiedKFold
from sklearn.linear_model import LinearRegression
from tqdm.notebook import tqdm
import lightgbm as lgb
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
import tensorflow.keras.losses as losses
import tensorflow.keras.optimizers as optim
import tensorflow.keras.activations as activations
import matplotlib.pyplot as plt
sys.path.append('../src')
sys.path.append('../Swin-Transformer-TF')
from ml_utils import *
from utils import *
from swintransformer import SwinTransformer
warnings.filterwarnings('ignore')
pd.options.display.max_columns = 200
pd.options.display.max_rows = 20
%matplotlib inline
TABLE_DIR = "../data/taskA/table"
en_df = pd.read_csv(os.path.join(TABLE_DIR, "ensemble_data.csv")) # 各画像データをSwinTransformerで特徴量抽出したデータを含む
en_df = en_df.drop('target', axis=1)
en_df = en_df.rename(columns={'price_0': 'target'})
en_df['target'] = en_df['target'].apply(lambda x: np.log1p(x))
print(f"ensemble data shape: {en_df.shape}")
en_df = en_df.query("price_1 != 0").reset_index(drop=True)
en_df = en_df[en_df['collection.name'] != 'Angry Apes United'] # 1つしか存在しないもの
collection_means = en_df.groupby('collection.name')['price_1'].mean()
en_df['target_encoding'] = en_df['collection.name'].map(collection_means)
print(f"drop 1 transaction data: {en_df.shape}")
def create_model(len_seq: int = 49, input_dim: int = 1, output_dim: int = 1):
inputs = layers.Input(shape=(len_seq, input_dim))
lstm = layers.LSTM(64)(inputs)
outputs = layers.Dense(output_dim)(lstm)
model = models.Model(inputs=[inputs], outputs=[outputs])
model.compile(loss=losses.mean_squared_error, optimizer=optim.Adam())
return model
model = create_model()
model.summary()
features = [f"price_{i}" for i in range(1, 50)]
target = 'target'
X = en_df[features].values.reshape(-1, 49, 1)
y = en_df[target].values
model.fit(X, y, validation_split=0.1, epochs=20, batch_size=64)
base_model = models.Sequential(model.layers[:-1])
seq_features = base_model.predict(X)
seq_names = [f"seq_feature{i}" for i in range(64)]
en_df[seq_names] = seq_features
image_names = [f"image_feature{i}" for i in range(64)]
use_cols = seq_names + image_names + ['target_encoding']
target = 'target'
```
```
def eval_model(df, features, target, model, model_name):
kf = StratifiedKFold(n_splits=4, random_state=6174, shuffle=True)
rmse_scores = np.array([])
mae_scores = np.array([])
model_no = 0
models = {}
for train_idx, val_idx in kf.split(df, df['collection.name'].values):
train_df, val_df = df.iloc[train_idx], df.iloc[val_idx]
enc = train_df.groupby(['collection.name'])['target'].mean()
train_df['target_encoding'] = train_df['collection.name'].map(enc)
val_df['target_encoding'] = val_df['collection.name'].map(enc)
train_X, val_X = train_df[features].values, val_df[features].values
train_y, val_y = train_df[target].values, val_df[target].values
sc = StandardScaler()
train_X = sc.fit_transform(train_X)
val_X = sc.transform(val_X)
model.fit(train_X, train_y)
pred = model.predict(val_X)
rmse = np.sqrt(mean_squared_error(val_y, pred))
mae = mean_absolute_error(val_y, pred)
rmse_scores = np.append(rmse_scores, rmse)
mae_scores = np.append(mae_scores, mae)
models[f"model_{model_no}"] = model
model_no += 1
print(f"Use model: {model_name}")
print(f"RMSE Score: {rmse_scores.mean()}")
print(f"MAE Score: {mae_scores.mean()}")
return models
linear_models = eval_model(en_df, use_cols, target, LinearRegression(), "Linear Regression")
lgb_models = eval_model(en_df, use_cols, target, lgb.LGBMRegressor(), "Light GBM")
X = en_df[use_cols].values
pred = np.zeros(len(X))
for model in lgb_models.values():
pred += model.predict(StandardScaler().fit_transform(X))
en_df['pred'] = pred / 4
en_df[['target', 'pred']].describe()
en_df['real_rate'] = en_df['target'] - en_df['price_1'].apply(lambda x: np.log1p(x))
en_df['pred_rate'] = en_df['pred'] - en_df['price_1'].apply(lambda x: np.log1p(x))
en_df[['real_rate', 'pred_rate']].describe()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred'], en_df['pred'] - en_df['target'])
plt.title("residual plot(Light GBM)")
plt.grid(True)
plt.show()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_rate'], en_df['pred_rate'] - en_df['real_rate'])
plt.title("rate residual plot(Light GBM)")
plt.grid(True)
plt.show()
X = en_df[use_cols].values
pred = np.zeros(len(X))
for model in linear_models.values():
pred += model.predict(StandardScaler().fit_transform(X))
en_df['pred_linear'] = pred / 4
en_df['pred_rate_linear'] = en_df['pred_linear'] - en_df['price_1'].apply(lambda x: np.log1p(x))
en_df[['target', 'pred_linear']].describe()
en_df[['real_rate', 'pred_rate_linear']].describe()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_linear']*0.038 + en_df['pred']*0.962, en_df['pred_linear']*0.038 + en_df['pred']*0.962- en_df['target'])
plt.grid(True)
plt.title("redisual plot(linear + Light GBM)")
plt.show()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_linear'], en_df['pred_linear'] - en_df['target'])
plt.grid(True)
plt.title("redisual plot(linear)")
plt.show()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_rate_linear'], en_df['pred_rate_linear'] - en_df['real_rate'])
plt.grid(True)
plt.title("rate redisual plot(linear)")
plt.show()
def create_meta_nn(input_dim: int = 129, output_dim: int = 1):
inputs = layers.Input(shape=(input_dim))
dense1 = layers.Dense(64, activation=None)(inputs)
activation1 = layers.Activation(activations.gelu)(dense1)
dr1 = layers.Dropout(0.5)(activation1)
dense2 = layers.Dense(16, activation=None)(dr1)
activation2 = layers.Activation(activations.gelu)(dense2)
dr2 = layers.Dropout(0.3)(activation2)
outputs = layers.Dense(1)(dr2)
model = models.Model(inputs=[inputs], outputs=[outputs])
model.compile(loss=losses.mean_squared_error, optimizer=optim.Adam())
return model
meta_nn = create_meta_nn()
meta_nn.summary()
def eval_nn(df, features, target):
kf = StratifiedKFold(n_splits=4, random_state=6174, shuffle=True)
rmse_scores = np.array([])
mae_scores = np.array([])
model_no = 0
models = {}
for train_idx, val_idx in kf.split(df, df['collection.name'].values):
train_df, val_df = df.iloc[train_idx], df.iloc[val_idx]
enc = train_df.groupby(['collection.name'])['target'].mean()
train_df['target_encoding'] = train_df['collection.name'].map(enc)
val_df['target_encoding'] = val_df['collection.name'].map(enc)
train_X, val_X = train_df[features].values, val_df[features].values
train_y, val_y = train_df[target].values, val_df[target].values
model = create_meta_nn()
model.fit(train_X, train_y, epochs=50, batch_size=64, validation_data=(val_X, val_y))
pred = model.predict(val_X)
rmse = np.sqrt(mean_squared_error(val_y, pred))
mae = mean_absolute_error(val_y, pred)
rmse_scores = np.append(rmse_scores, rmse)
mae_scores = np.append(mae_scores, mae)
models[f"model_{model_no}"] = model
model_no += 1
print("Use model: Neural Network")
print(f"RMSE Score: {rmse_scores.mean()}")
print(f"MAE Score: {mae_scores.mean()}")
return models
nn_models = eval_nn(en_df, use_cols, target)
X = en_df[use_cols].values
pred = np.zeros((len(X), 1))
for model in nn_models.values():
pred += model.predict(StandardScaler().fit_transform(X))
en_df['pred_nn'] = pred / 4
en_df['pred_rate_nn'] = en_df['pred_nn'] - en_df['price_1'].apply(lambda x: np.log1p(x))
en_df[['real_rate', 'pred_rate_nn']].describe()
en_df[['target', 'pred_nn']].describe()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_nn'], en_df['pred_nn'] - en_df['target'])
plt.title("residual plot(NN)")
plt.grid(True)
plt.show()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_rate_nn'], en_df['pred_rate_nn'] - en_df['real_rate'])
plt.title("rate residual plot(NN)")
plt.grid(True)
plt.show()
print(f"LGBM rate RMSE: {np.sqrt(mean_squared_error(en_df['real_rate'].values, en_df['pred_rate'].values))}")
print(f"LGBM rate MAE: {mean_absolute_error(en_df['real_rate'].values, en_df['pred_rate'].values)}")
print(f"Linear rate RMSE: {np.sqrt(mean_squared_error(en_df['real_rate'].values, en_df['pred_rate_linear'].values))}")
print(f"Linear rate MAE: {mean_absolute_error(en_df['real_rate'].values, en_df['pred_rate_linear'].values)}")
print(f"NN rate RMSE: {np.sqrt(mean_squared_error(en_df['real_rate'].values, en_df['pred_rate_nn'].values))}")
print(f"NN rate MAE: {mean_absolute_error(en_df['real_rate'].values, en_df['pred_rate_nn'].values)}")
```
|
github_jupyter
|
from comet_ml import Experiment
import os
import sys
import gc
import warnings
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import KFold, train_test_split, StratifiedKFold
from sklearn.linear_model import LinearRegression
from tqdm.notebook import tqdm
import lightgbm as lgb
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
import tensorflow.keras.losses as losses
import tensorflow.keras.optimizers as optim
import tensorflow.keras.activations as activations
import matplotlib.pyplot as plt
sys.path.append('../src')
sys.path.append('../Swin-Transformer-TF')
from ml_utils import *
from utils import *
from swintransformer import SwinTransformer
warnings.filterwarnings('ignore')
pd.options.display.max_columns = 200
pd.options.display.max_rows = 20
%matplotlib inline
TABLE_DIR = "../data/taskA/table"
en_df = pd.read_csv(os.path.join(TABLE_DIR, "ensemble_data.csv")) # 各画像データをSwinTransformerで特徴量抽出したデータを含む
en_df = en_df.drop('target', axis=1)
en_df = en_df.rename(columns={'price_0': 'target'})
en_df['target'] = en_df['target'].apply(lambda x: np.log1p(x))
print(f"ensemble data shape: {en_df.shape}")
en_df = en_df.query("price_1 != 0").reset_index(drop=True)
en_df = en_df[en_df['collection.name'] != 'Angry Apes United'] # 1つしか存在しないもの
collection_means = en_df.groupby('collection.name')['price_1'].mean()
en_df['target_encoding'] = en_df['collection.name'].map(collection_means)
print(f"drop 1 transaction data: {en_df.shape}")
def create_model(len_seq: int = 49, input_dim: int = 1, output_dim: int = 1):
inputs = layers.Input(shape=(len_seq, input_dim))
lstm = layers.LSTM(64)(inputs)
outputs = layers.Dense(output_dim)(lstm)
model = models.Model(inputs=[inputs], outputs=[outputs])
model.compile(loss=losses.mean_squared_error, optimizer=optim.Adam())
return model
model = create_model()
model.summary()
features = [f"price_{i}" for i in range(1, 50)]
target = 'target'
X = en_df[features].values.reshape(-1, 49, 1)
y = en_df[target].values
model.fit(X, y, validation_split=0.1, epochs=20, batch_size=64)
base_model = models.Sequential(model.layers[:-1])
seq_features = base_model.predict(X)
seq_names = [f"seq_feature{i}" for i in range(64)]
en_df[seq_names] = seq_features
image_names = [f"image_feature{i}" for i in range(64)]
use_cols = seq_names + image_names + ['target_encoding']
target = 'target'
def eval_model(df, features, target, model, model_name):
kf = StratifiedKFold(n_splits=4, random_state=6174, shuffle=True)
rmse_scores = np.array([])
mae_scores = np.array([])
model_no = 0
models = {}
for train_idx, val_idx in kf.split(df, df['collection.name'].values):
train_df, val_df = df.iloc[train_idx], df.iloc[val_idx]
enc = train_df.groupby(['collection.name'])['target'].mean()
train_df['target_encoding'] = train_df['collection.name'].map(enc)
val_df['target_encoding'] = val_df['collection.name'].map(enc)
train_X, val_X = train_df[features].values, val_df[features].values
train_y, val_y = train_df[target].values, val_df[target].values
sc = StandardScaler()
train_X = sc.fit_transform(train_X)
val_X = sc.transform(val_X)
model.fit(train_X, train_y)
pred = model.predict(val_X)
rmse = np.sqrt(mean_squared_error(val_y, pred))
mae = mean_absolute_error(val_y, pred)
rmse_scores = np.append(rmse_scores, rmse)
mae_scores = np.append(mae_scores, mae)
models[f"model_{model_no}"] = model
model_no += 1
print(f"Use model: {model_name}")
print(f"RMSE Score: {rmse_scores.mean()}")
print(f"MAE Score: {mae_scores.mean()}")
return models
linear_models = eval_model(en_df, use_cols, target, LinearRegression(), "Linear Regression")
lgb_models = eval_model(en_df, use_cols, target, lgb.LGBMRegressor(), "Light GBM")
X = en_df[use_cols].values
pred = np.zeros(len(X))
for model in lgb_models.values():
pred += model.predict(StandardScaler().fit_transform(X))
en_df['pred'] = pred / 4
en_df[['target', 'pred']].describe()
en_df['real_rate'] = en_df['target'] - en_df['price_1'].apply(lambda x: np.log1p(x))
en_df['pred_rate'] = en_df['pred'] - en_df['price_1'].apply(lambda x: np.log1p(x))
en_df[['real_rate', 'pred_rate']].describe()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred'], en_df['pred'] - en_df['target'])
plt.title("residual plot(Light GBM)")
plt.grid(True)
plt.show()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_rate'], en_df['pred_rate'] - en_df['real_rate'])
plt.title("rate residual plot(Light GBM)")
plt.grid(True)
plt.show()
X = en_df[use_cols].values
pred = np.zeros(len(X))
for model in linear_models.values():
pred += model.predict(StandardScaler().fit_transform(X))
en_df['pred_linear'] = pred / 4
en_df['pred_rate_linear'] = en_df['pred_linear'] - en_df['price_1'].apply(lambda x: np.log1p(x))
en_df[['target', 'pred_linear']].describe()
en_df[['real_rate', 'pred_rate_linear']].describe()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_linear']*0.038 + en_df['pred']*0.962, en_df['pred_linear']*0.038 + en_df['pred']*0.962- en_df['target'])
plt.grid(True)
plt.title("redisual plot(linear + Light GBM)")
plt.show()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_linear'], en_df['pred_linear'] - en_df['target'])
plt.grid(True)
plt.title("redisual plot(linear)")
plt.show()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_rate_linear'], en_df['pred_rate_linear'] - en_df['real_rate'])
plt.grid(True)
plt.title("rate redisual plot(linear)")
plt.show()
def create_meta_nn(input_dim: int = 129, output_dim: int = 1):
inputs = layers.Input(shape=(input_dim))
dense1 = layers.Dense(64, activation=None)(inputs)
activation1 = layers.Activation(activations.gelu)(dense1)
dr1 = layers.Dropout(0.5)(activation1)
dense2 = layers.Dense(16, activation=None)(dr1)
activation2 = layers.Activation(activations.gelu)(dense2)
dr2 = layers.Dropout(0.3)(activation2)
outputs = layers.Dense(1)(dr2)
model = models.Model(inputs=[inputs], outputs=[outputs])
model.compile(loss=losses.mean_squared_error, optimizer=optim.Adam())
return model
meta_nn = create_meta_nn()
meta_nn.summary()
def eval_nn(df, features, target):
kf = StratifiedKFold(n_splits=4, random_state=6174, shuffle=True)
rmse_scores = np.array([])
mae_scores = np.array([])
model_no = 0
models = {}
for train_idx, val_idx in kf.split(df, df['collection.name'].values):
train_df, val_df = df.iloc[train_idx], df.iloc[val_idx]
enc = train_df.groupby(['collection.name'])['target'].mean()
train_df['target_encoding'] = train_df['collection.name'].map(enc)
val_df['target_encoding'] = val_df['collection.name'].map(enc)
train_X, val_X = train_df[features].values, val_df[features].values
train_y, val_y = train_df[target].values, val_df[target].values
model = create_meta_nn()
model.fit(train_X, train_y, epochs=50, batch_size=64, validation_data=(val_X, val_y))
pred = model.predict(val_X)
rmse = np.sqrt(mean_squared_error(val_y, pred))
mae = mean_absolute_error(val_y, pred)
rmse_scores = np.append(rmse_scores, rmse)
mae_scores = np.append(mae_scores, mae)
models[f"model_{model_no}"] = model
model_no += 1
print("Use model: Neural Network")
print(f"RMSE Score: {rmse_scores.mean()}")
print(f"MAE Score: {mae_scores.mean()}")
return models
nn_models = eval_nn(en_df, use_cols, target)
X = en_df[use_cols].values
pred = np.zeros((len(X), 1))
for model in nn_models.values():
pred += model.predict(StandardScaler().fit_transform(X))
en_df['pred_nn'] = pred / 4
en_df['pred_rate_nn'] = en_df['pred_nn'] - en_df['price_1'].apply(lambda x: np.log1p(x))
en_df[['real_rate', 'pred_rate_nn']].describe()
en_df[['target', 'pred_nn']].describe()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_nn'], en_df['pred_nn'] - en_df['target'])
plt.title("residual plot(NN)")
plt.grid(True)
plt.show()
plt.figure(figsize=(16, 8))
plt.scatter(en_df['pred_rate_nn'], en_df['pred_rate_nn'] - en_df['real_rate'])
plt.title("rate residual plot(NN)")
plt.grid(True)
plt.show()
print(f"LGBM rate RMSE: {np.sqrt(mean_squared_error(en_df['real_rate'].values, en_df['pred_rate'].values))}")
print(f"LGBM rate MAE: {mean_absolute_error(en_df['real_rate'].values, en_df['pred_rate'].values)}")
print(f"Linear rate RMSE: {np.sqrt(mean_squared_error(en_df['real_rate'].values, en_df['pred_rate_linear'].values))}")
print(f"Linear rate MAE: {mean_absolute_error(en_df['real_rate'].values, en_df['pred_rate_linear'].values)}")
print(f"NN rate RMSE: {np.sqrt(mean_squared_error(en_df['real_rate'].values, en_df['pred_rate_nn'].values))}")
print(f"NN rate MAE: {mean_absolute_error(en_df['real_rate'].values, en_df['pred_rate_nn'].values)}")
| 0.533641 | 0.686764 |
# Analiza tekmovnj na Codeforces
## Priprava
Najprej moramo naložiti pandas in si pripraviti podatke, ki jih bomo potrebovali za analizo.
```
import pandas as pd
import os.path
pd.options.display.max_rows = 20
%matplotlib inline
# nalozimo razpredelnice
users_path = os.path.join("obdelani_podatki", "users.csv")
users = pd.read_csv(users_path, index_col='id')
tasks_path = os.path.join("obdelani_podatki", "tasks.csv")
tasks = pd.read_csv(tasks_path, index_col='id')
submissions_path = os.path.join("obdelani_podatki", "submissions.csv")
submissions = pd.read_csv(submissions_path, index_col='id')
contestants_path = os.path.join("obdelani_podatki", "contestants.csv")
contestants = pd.read_csv(contestants_path, index_col='id')
users
tasks
submissions
contestants
```
Spodobi se najprej na kratko razložiti, kako delujejo tekmovanja na codeforces in kaj pomenijo podatki v posameznih stolpcih tabel.
Na spletni trani codeforces so pogosto tekmovanja, na katerih tekmujejo programerji iz celega sveta. Na vsakem tekmovanju je nekaj nalog (tasks), ki jih morajo tekmovalci (contestants) rešiti. To naredijo tako, da napišejo ustrezno kodo, ki jo zahteva naloga, in to kodo oddajo. Taki oddajo kode rečemo tudi submission.
**Users**
V tej tabeli se nahajajo podatki tekmovalcev, ki so tekmovali na zajetih tekmovanjih. Ker je vsak od njih lahko sodeloval na več tekmovanjih, imamo za vsakega uporabnika in vsako tekmovanje, na katerem je bil, še zapis v tabeli *contestants*.
Za vsakega imamo podano:
* uporabniško ime
* rank (glede na codeforces rating)
* državo (ni zagotovila, da so podatki o državi resnični)
**Tasks**
Za vsako nalogo imamo shranjeno tekmovanje, na katerem se je pojavila, in zaporedno številko naloge na tem tekmovanju.
**Submissions**
Za vsako oddajo imamo podane naslednje podatke:
* tekmovalec, ki je naredil oddajo
* naloga h kateri spada ta oddaja
* programski jezik
* čas od začetka tekmovanja
**Contestants**
Ta tabela povezuje tekmovanja in tekmovalce/uporabnike.
Vsak vnos v tej tabli ima:
* uporabnika, ki je sodeloval na tekmovanju
* tekmovanje
* uvrstitev na tekmovanju
Cilj naloge je ovreči ali potrditi naslednje **hipoteze**:
* Najboljši tekmovalci prvo nalogo rešijo v manj kot 10 minutah
* Več kot 80 % od vseh oddaj je v jeziku c++, manj kot 5 % pa v pythonu
* Tekmovalci, ki so bili na več tekmovanjih, so bolši kot tisti, ki so bili na manj tekmovanjih
Poleg tega bomo nekoliko bolj podrobno obravnavali tudi naslednja vprašanja:
* Iz katerih držav so najboljši programerji?
* Ali obstaja povezava med tem, kako hitro nek tekmovalec rešuje lahke naloge in tem, kako težke naloge je sposoben rešiti?
* Kako udeležba na tekmovanjih vpliva na uspeh na tekmovanjih?
## Od kod so najbolši programerji
V tem razdelku bomo ugotovili, iz katerih držav prihajajo najboljši tekmovalci na zadnih 200 tekmovanjih na codeforces.
Najprej si poglejmo, koliko tekmovalcev je iz katere države.
```
users_by_country = users.groupby("country").size().sort_values().tail(15)
users_by_country
users_by_country.plot.bar()
```
Kot bi pričakovali, je iz držav z večjo populacijo več tekmovalcev.
Presenetljivo so ZDA šele na osmem mestu, čeprav so tretja največja država po populaciji in so dokaj razvita država (ni težav z dostopom do interneta ipd.).
Za vsakega uporabnika bomo določili njegov uspeh (score), ki bo mera tega, kako dobro se je odrezal na tekmovanjih, ki se jih je udeležil. Uspeh uporabnika bomo definirali kot `201 - povprečje uvrstitev po vseh tekmovanjih, ki se jih je udeležil`. Uspeh je tako med 0 in 200. Prikazan je v zadnjem stolpcu naslednje tabele.
```
avg_place = contestants.groupby("user", as_index=False)["place"].mean()
users = pd.merge(users, avg_place, left_on='id', right_on='user')
users["score"] = 201 - users.place
users = users.set_index("user")
users
```
Oglejmo si, kako zgledajo podatki, če se omejimo samo na najboljših 4000 (to je približno 10 % vseh uporabnikov, ki jih analiziramo) uporabnikov.
```
best_by_country = users.sort_values("score", ascending=False)[:4000].groupby("country").size().sort_values().tail(15)
best_by_country
best_by_country.plot.bar()
```
Opazimo lahko, da je zelo majhen del tekmovalcev iz Indije med najboljšimi 4000 tekmovalci, če tekmovalce razvrstimo po njivem uspehu kot smo ga definirali zgoraj.
Poskusimo definirati boljšo oceno za to, kako dobri so tekmovalci iz posamezne države. Do zdaj smo samo primerjali število tekmovalcev neke države s številom tekmovalcev iz te države, ki so med 4000 najbolšimi po številu točk. Ker že imamo uspeh (score) posameznih tekmovalcev, lahko definiramo uspeh države, kot vsoto točk vseh tekmovalcev iz te države. Od tu do konca razdelka bomo upoštevli le države z vsaj 30 tekmovalci.
```
score_by_country = users.groupby("country", as_index=False)["score"].sum()
users_by_country = users.groupby("country", as_index=False).size().rename(columns={'size':'users'})
score_to_users = pd.merge(score_by_country, users_by_country, left_on='country', right_on='country')
score_to_users = score_to_users[score_to_users.users > 30]
score_to_users.plot.scatter("users", "score")
score_to_users
```
Opazimo, da število točk vsake države linearno narašča s številom tekmovalcev iz te države.
Kljub temu pa nekatere države odstopajo. Kot smo že prej ugotovili ima Indija v primerjavi z ostalimi državami majhen delež res dobrih programerjev. Na tem grafu Indijo predstavlja druga pika iz desne proti levi. Vidimo, da ima res majhno število točk glede na število tekmovalcev.
Oglejmo si še države z največjim številom točk na tekmovalca.
```
score_to_users["relative_score"] = score_to_users["score"] / score_to_users["users"]
score_to_users = score_to_users.sort_values("relative_score", ascending=True)
score_to_users.plot.bar(x="country", y="relative_score")
```
Iz grafa se vidi katere države so bolj uspešne in katere manj. Indija je med manj uspešnimi, kot so nam to kazali podatki že prej.
Najbolj uspešne države po našem kriteriju so Turčija, Kanada in Armenija. Med Evropskimi državam je najvišje Francija, sledita ji Romunija in Italija.
Zavedati se moramo, da ti podatki niso bili najbolj primerni za analizo tega, kako dobri v programerskih nalogah so ljudje iz različnih držav, saj vsebujejo le prvih 200 tekmovalcev na vsakem tekmovanju.
## Najbolj popularni programski jeziki
Smiselno se je vprašati, katerih jezikih najraje programerajo tekmovalci.
Spodnja tabela in graf prikazujeta 5 največkrat uporabljenih programskih jezikov in število oddaj v vsakem od teh jezikov.
```
po_jezikih = submissions.groupby("proglang").size()
popularni_jeziki = po_jezikih.sort_values()[-5:]
popularni_jeziki
popularni_jeziki.plot.bar()
```
Vidimo, da je daleč najbolj popularen jezik med tekmovalnimi programerji c++, python pa je šele na petem mestu. To ni presenetljivo, saj je c++ precej hitrejši od pythona. Vseeno smo pričakovali, da bo v pythonu več oddaj, kot se je izkazalo da jih je, saj je ponavadi prvih nekaj nalog zelo enostavnih in se jih hitreje reši v pythonu, kjer je koda ponavadi krajša.
```
size = len(submissions[submissions.proglang.notnull()])
cpp_part = popularni_jeziki.loc["c++"] / size
python_part = popularni_jeziki.loc["python"] / size
print("Delež oddaj v c++:", round(cpp_part*100, 2), "%")
print("Delež oddaj v pythonu: ", round(python_part*100, 2), "%")
```
Torej je v pythonu precej manj kot 5 % vseh oddaj, v c++ pa je skoraj 87 % oddaj, torej več kot 80 %.
## Hitrost reševanja
V tem razdelku si bomo ogledali, če obstaja kakšna korelacija med hitrostjo nekdo reševanja nalog in tem, kako težke naloge je nekdo sposoben rešiti.
Najprej tabeli uporabnikov dodajmo še en stolpec, ki bo pokazatelj tega, kako hitro kdo rešuje naloge. Za vsakega tekmovalca na vsakem tekmovanju bomo vzeli čas (v minutah), ki ga je porabil, da je uspešno rešil eno nalogo. Ponavadi vsi začnejo z reševanjem lahkih nalog (kar po vrsti) in pri teh nalogah tudi ni potrebno veliko znanja o algoritmih, treba je le hitro razumeti kaj naloga zahteva in implementirati rešitev. Zanima nas, če je to kako povezano tudi z zmožnostjo reševanja težkih nalog.
```
contestants["min_time"] = pd.merge(contestants, submissions, left_on="id", right_on="contestant").groupby("contestant").min()["time"]
contestants
```
Za vsako nalogo bomo ugotovili kakšen delež tekmovalcev jo je rešil. Težavnost naloge bo delež tekmovalcev (izmed prvih 200), ki niso rešili naloge.
```
everyone = submissions.groupby("task").size()
solved = submissions[submissions.time.notnull()].groupby("task").size()
tasks["difficulty"] = 1 - solved / everyone
tasks
```
Za vsakega tekmovalca na vsakem tekmovanju bomo ugotovili težavnost najtežje naloge, ki jo je rešil.
Tekmovalec, ki porabi preveč časa, da reši prvo nalogo tako ali tako nima časa reševati težjih nalog.
Za takega tekmovalca težko rečemo kako težke naloge je sposoben reševati, zato se bomo omejili le na tekmovalce, ki prvo nalogo rešijo v manj kot 60 min (običajno tekmovanja trajajo okoli 2 uri).
```
contestant_submission = pd.merge(contestants, submissions, left_on="id", right_on="contestant")
conestant_task = pd.merge(contestant_submission, tasks, left_on="task", right_on="id")
corelation = conestant_task.groupby("contestant").max()[["min_time", "difficulty"]]
corelation = corelation[corelation.min_time < 60]
corelation.plot.scatter("min_time", "difficulty")
corelation
```
Na levi so točke nakopičene pri vrhum, na desni strani pa so enakomerno razporejene po višini.
Vidimo, da so tekmovalci, ki hitreje rešijo prvo nalogo, ponavadi sposobni reševati težje naloge kot tisti, ki za prvo nalogo porabijo več časa.
Na hitro si oglejmo še, kolikšen del tekmovalcev, ki so se uvrstili med 200 najboljših, reši prvo nalogo v manj kot 10 minutah.
```
len(contestants[contestants.min_time <= 10]) / len(contestants)
```
Ta delež je skoraj 74 %.
## Ali je res, da vaja dela mojstra?
Ogledali si bomo, kako se uvrstitve spreminjajo, ko je bil nek tekmovalec na več tekmovanjih. Zdi se, da bi si tekmovalci na prejšnih tekmovanjih, ki so se jih udeležili, pridobili kakšno izkušno in bi bili bolj uspešni na naslednjih.
Za vsakega uporabnika bomo ugotovili, na koliko tekmovanjih je bil. Temu številu bomo rekli izkušnje uporabnika in ga shranili v stolpec *exp* v tabeli uporabnikov.
```
users["exp"] = contestants.groupby("user")["contest"].count()
users
```
Izkušnje so med 1 in 200, saj imamo podatke le o 200 tekmovanjih (V resnici nihče ni bil na več kot 66 tekmovanjih). Uporabnike tako lahko združimo v skupine glede na to, koliko izkušenj imajo, nato pa za vsako skupino izračunamo povprečen uspeh (score).
Spodnja tabela in diagram prikazujeta, kako je uspeh odvisen od izkušenj.
```
score_by_exp = users.groupby("exp", as_index=False)[["exp", "score"]].mean()
score_by_exp.plot.scatter("exp", "score")
score_by_exp
```
Vidimo, da uspeh narašča z izkušnjami (zgleda linearno). Torej je udeležba na tekmovanjih učinkovit način učenja.
Seveda obstaja tudi na codeforces sistem zbiranja točk, ki deluje na osnovi uvrstitev na starih tekmovanjih. Uporabniki z višjim številom točk so višjega ranga. Zanimivo bi bilo videti, kako se naš sistem primerja s tem.
```
only_rated = users[users["rank"] != "Unrated"]
score_by_rank = only_rated[["rank", "score"]].groupby("rank", as_index=False).mean().sort_values("score", ascending=True)
score_by_rank.plot.bar(x="rank", y="score")
score_by_rank
```
Vidimo, da je rang *Newbie* precej visoko. Morda si nekateri ustvarijo nove račune in tekmujejo z njimi, tako so potem nizki rangi zelo uspešni. Tudi rank *Pupil* je nekoliko visoko, *Master* pa je nekoliko nizko. Razen teh nekaj anomalij pa se oba sistema točkovanj dokaj ujemata.
## Zakljiček
Ugotovili smo, da so dobri programerji enakomerno razporejeni po svetu. Seveda jih je več v državah, ki imajo tudi večjo populacijo.
Velika večina tekmovalcev najraje programira v programskem jeziku c++, zelo malo pa v pythonu.
Vaja in udeležba na tekmovanjih sta zelo hiter način napredovanja in učenja programiranja. To velja tudi za tiste, ki so že zelo dobri. V podatkih so bili zajeti le najboljši tekmovalci, pa je vseeno bila vidna močna zveza med številom udeleženih tekmovanj in rezultatom.
Obstaja zveza med tem kako hitro tekmovalec rešuje naloge in kako težke naloge lahko reši. To je seveda smiselno, boljši kot je tekmovalec, manj mu je treba razmišljati pri lahkih nalogah. Poleg tega ima, kot smo ugotovili, verjetno za sabo že veliko programerskih izkušenj in se mu lahke naloge zdijo mehanske.
|
github_jupyter
|
import pandas as pd
import os.path
pd.options.display.max_rows = 20
%matplotlib inline
# nalozimo razpredelnice
users_path = os.path.join("obdelani_podatki", "users.csv")
users = pd.read_csv(users_path, index_col='id')
tasks_path = os.path.join("obdelani_podatki", "tasks.csv")
tasks = pd.read_csv(tasks_path, index_col='id')
submissions_path = os.path.join("obdelani_podatki", "submissions.csv")
submissions = pd.read_csv(submissions_path, index_col='id')
contestants_path = os.path.join("obdelani_podatki", "contestants.csv")
contestants = pd.read_csv(contestants_path, index_col='id')
users
tasks
submissions
contestants
users_by_country = users.groupby("country").size().sort_values().tail(15)
users_by_country
users_by_country.plot.bar()
avg_place = contestants.groupby("user", as_index=False)["place"].mean()
users = pd.merge(users, avg_place, left_on='id', right_on='user')
users["score"] = 201 - users.place
users = users.set_index("user")
users
best_by_country = users.sort_values("score", ascending=False)[:4000].groupby("country").size().sort_values().tail(15)
best_by_country
best_by_country.plot.bar()
score_by_country = users.groupby("country", as_index=False)["score"].sum()
users_by_country = users.groupby("country", as_index=False).size().rename(columns={'size':'users'})
score_to_users = pd.merge(score_by_country, users_by_country, left_on='country', right_on='country')
score_to_users = score_to_users[score_to_users.users > 30]
score_to_users.plot.scatter("users", "score")
score_to_users
score_to_users["relative_score"] = score_to_users["score"] / score_to_users["users"]
score_to_users = score_to_users.sort_values("relative_score", ascending=True)
score_to_users.plot.bar(x="country", y="relative_score")
po_jezikih = submissions.groupby("proglang").size()
popularni_jeziki = po_jezikih.sort_values()[-5:]
popularni_jeziki
popularni_jeziki.plot.bar()
size = len(submissions[submissions.proglang.notnull()])
cpp_part = popularni_jeziki.loc["c++"] / size
python_part = popularni_jeziki.loc["python"] / size
print("Delež oddaj v c++:", round(cpp_part*100, 2), "%")
print("Delež oddaj v pythonu: ", round(python_part*100, 2), "%")
contestants["min_time"] = pd.merge(contestants, submissions, left_on="id", right_on="contestant").groupby("contestant").min()["time"]
contestants
everyone = submissions.groupby("task").size()
solved = submissions[submissions.time.notnull()].groupby("task").size()
tasks["difficulty"] = 1 - solved / everyone
tasks
contestant_submission = pd.merge(contestants, submissions, left_on="id", right_on="contestant")
conestant_task = pd.merge(contestant_submission, tasks, left_on="task", right_on="id")
corelation = conestant_task.groupby("contestant").max()[["min_time", "difficulty"]]
corelation = corelation[corelation.min_time < 60]
corelation.plot.scatter("min_time", "difficulty")
corelation
len(contestants[contestants.min_time <= 10]) / len(contestants)
users["exp"] = contestants.groupby("user")["contest"].count()
users
score_by_exp = users.groupby("exp", as_index=False)[["exp", "score"]].mean()
score_by_exp.plot.scatter("exp", "score")
score_by_exp
only_rated = users[users["rank"] != "Unrated"]
score_by_rank = only_rated[["rank", "score"]].groupby("rank", as_index=False).mean().sort_values("score", ascending=True)
score_by_rank.plot.bar(x="rank", y="score")
score_by_rank
| 0.204183 | 0.851336 |
# 循环神经网络
:label:`sec_rnn`
在 :numref:`sec_language_model`中,
我们介绍了$n$元语法模型,
其中单词$x_t$在时间步$t$的条件概率仅取决于前面$n-1$个单词。
对于时间步$t-(n-1)$之前的单词,
如果我们想将其可能产生的影响合并到$x_t$上,
需要增加$n$,然而模型参数的数量也会随之呈指数增长,
因为词表$\mathcal{V}$需要存储$|\mathcal{V}|^n$个数字,
因此与其将$P(x_t \mid x_{t-1}, \ldots, x_{t-n+1})$模型化,
不如使用隐变量模型:
$$P(x_t \mid x_{t-1}, \ldots, x_1) \approx P(x_t \mid h_{t-1}),$$
其中$h_{t-1}$是*隐状态*(hidden state),
也称为*隐藏变量*(hidden variable),
它存储了到时间步$t-1$的序列信息。
通常,我们可以基于当前输入$x_{t}$和先前隐状态$h_{t-1}$
来计算时间步$t$处的任何时间的隐状态:
$$h_t = f(x_{t}, h_{t-1}).$$
:eqlabel:`eq_ht_xt`
对于 :eqref:`eq_ht_xt`中的函数$f$,隐变量模型不是近似值。
毕竟$h_t$是可以仅仅存储到目前为止观察到的所有数据,
然而这样的操作可能会使计算和存储的代价都变得昂贵。
回想一下,我们在 :numref:`chap_perceptrons`中
讨论过的具有隐藏单元的隐藏层。
值得注意的是,隐藏层和隐状态指的是两个截然不同的概念。
如上所述,隐藏层是在从输入到输出的路径上(以观测角度来理解)的隐藏的层,
而隐状态则是在给定步骤所做的任何事情(以技术角度来定义)的*输入*,
并且这些状态只能通过先前时间步的数据来计算。
*循环神经网络*(recurrent neural networks,RNNs)
是具有隐状态的神经网络。
在介绍循环神经网络模型之前,
我们首先回顾 :numref:`sec_mlp`中介绍的多层感知机模型。
## 无隐状态的神经网络
让我们来看一看只有单隐藏层的多层感知机。
设隐藏层的激活函数为$\phi$,
给定一个小批量样本$\mathbf{X} \in \mathbb{R}^{n \times d}$,
其中批量大小为$n$,输入维度为$d$,
则隐藏层的输出$\mathbf{H} \in \mathbb{R}^{n \times h}$通过下式计算:
$$\mathbf{H} = \phi(\mathbf{X} \mathbf{W}_{xh} + \mathbf{b}_h).$$
:eqlabel:`rnn_h_without_state`
在 :eqref:`rnn_h_without_state`中,
我们拥有的隐藏层权重参数为$\mathbf{W}_{xh} \in \mathbb{R}^{d \times h}$,
偏置参数为$\mathbf{b}_h \in \mathbb{R}^{1 \times h}$,
以及隐藏单元的数目为$h$。
因此求和时可以应用广播机制(见 :numref:`subsec_broadcasting`)。
接下来,将隐藏变量$\mathbf{H}$用作输出层的输入。
输出层由下式给出:
$$\mathbf{O} = \mathbf{H} \mathbf{W}_{hq} + \mathbf{b}_q,$$
其中,$\mathbf{O} \in \mathbb{R}^{n \times q}$是输出变量,
$\mathbf{W}_{hq} \in \mathbb{R}^{h \times q}$是权重参数,
$\mathbf{b}_q \in \mathbb{R}^{1 \times q}$是输出层的偏置参数。
如果是分类问题,我们可以用$\text{softmax}(\mathbf{O})$
来计算输出类别的概率分布。
这完全类似于之前在 :numref:`sec_sequence`中解决的回归问题,
因此我们省略了细节。
无须多言,只要可以随机选择“特征-标签”对,
并且通过自动微分和随机梯度下降能够学习网络参数就可以了。
## 有隐状态的循环神经网络
:label:`subsec_rnn_w_hidden_states`
有了隐状态后,情况就完全不同了。
假设我们在时间步$t$有小批量输入$\mathbf{X}_t \in \mathbb{R}^{n \times d}$。
换言之,对于$n$个序列样本的小批量,
$\mathbf{X}_t$的每一行对应于来自该序列的时间步$t$处的一个样本。
接下来,用$\mathbf{H}_t \in \mathbb{R}^{n \times h}$
表示时间步$t$的隐藏变量。
与多层感知机不同的是,
我们在这里保存了前一个时间步的隐藏变量$\mathbf{H}_{t-1}$,
并引入了一个新的权重参数$\mathbf{W}_{hh} \in \mathbb{R}^{h \times h}$,
来描述如何在当前时间步中使用前一个时间步的隐藏变量。
具体地说,当前时间步隐藏变量由当前时间步的输入
与前一个时间步的隐藏变量一起计算得出:
$$\mathbf{H}_t = \phi(\mathbf{X}_t \mathbf{W}_{xh} + \mathbf{H}_{t-1} \mathbf{W}_{hh} + \mathbf{b}_h).$$
:eqlabel:`rnn_h_with_state`
与 :eqref:`rnn_h_without_state`相比,
:eqref:`rnn_h_with_state`多添加了一项
$\mathbf{H}_{t-1} \mathbf{W}_{hh}$,
从而实例化了 :eqref:`eq_ht_xt`。
从相邻时间步的隐藏变量$\mathbf{H}_t$和
$\mathbf{H}_{t-1}$之间的关系可知,
这些变量捕获并保留了序列直到其当前时间步的历史信息,
就如当前时间步下神经网络的状态或记忆,
因此这样的隐藏变量被称为*隐状态*(hidden state)。
由于在当前时间步中,
隐状态使用的定义与前一个时间步中使用的定义相同,
因此 :eqref:`rnn_h_with_state`的计算是*循环的*(recurrent)。
于是基于循环计算的隐状态神经网络被命名为
*循环神经网络*(recurrent neural network)。
在循环神经网络中执行 :eqref:`rnn_h_with_state`计算的层
称为*循环层*(recurrent layer)。
有许多不同的方法可以构建循环神经网络,
由 :eqref:`rnn_h_with_state`定义的隐状态的循环神经网络是非常常见的一种。
对于时间步$t$,输出层的输出类似于多层感知机中的计算:
$$\mathbf{O}_t = \mathbf{H}_t \mathbf{W}_{hq} + \mathbf{b}_q.$$
循环神经网络的参数包括隐藏层的权重
$\mathbf{W}_{xh} \in \mathbb{R}^{d \times h}, \mathbf{W}_{hh} \in \mathbb{R}^{h \times h}$和偏置$\mathbf{b}_h \in \mathbb{R}^{1 \times h}$,
以及输出层的权重$\mathbf{W}_{hq} \in \mathbb{R}^{h \times q}$
和偏置$\mathbf{b}_q \in \mathbb{R}^{1 \times q}$。
值得一提的是,即使在不同的时间步,循环神经网络也总是使用这些模型参数。
因此,循环神经网络的参数开销不会随着时间步的增加而增加。
:numref:`fig_rnn`展示了循环神经网络在三个相邻时间步的计算逻辑。
在任意时间步$t$,隐状态的计算可以被视为:
1. 拼接当前时间步$t$的输入$\mathbf{X}_t$和前一时间步$t-1$的隐状态$\mathbf{H}_{t-1}$;
1. 将拼接的结果送入带有激活函数$\phi$的全连接层。
全连接层的输出是当前时间步$t$的隐状态$\mathbf{H}_t$。
在本例中,模型参数是$\mathbf{W}_{xh}$和$\mathbf{W}_{hh}$的拼接,
以及$\mathbf{b}_h$的偏置,所有这些参数都来自 :eqref:`rnn_h_with_state`。
当前时间步$t$的隐状态$\mathbf{H}_t$
将参与计算下一时间步$t+1$的隐状态$\mathbf{H}_{t+1}$。
而且$\mathbf{H}_t$还将送入全连接输出层,
用于计算当前时间步$t$的输出$\mathbf{O}_t$。

:label:`fig_rnn`
我们刚才提到,隐状态中
$\mathbf{X}_t \mathbf{W}_{xh} + \mathbf{H}_{t-1} \mathbf{W}_{hh}$的计算,
相当于$\mathbf{X}_t$和$\mathbf{H}_{t-1}$的拼接
与$\mathbf{W}_{xh}$和$\mathbf{W}_{hh}$的拼接的矩阵乘法。
虽然这个性质可以通过数学证明,
但在下面我们使用一个简单的代码来说明一下。
首先,我们定义矩阵`X`、`W_xh`、`H`和`W_hh`,
它们的形状分别为$(3,1)$、$(1,4)$、$(3,4)$和$(4,4)$。
分别将`X`乘以`W_xh`,将`H`乘以`W_hh`,
然后将这两个乘法相加,我们得到一个形状为$(3,4)$的矩阵。
```
import torch
from d2l import torch as d2l
X, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4))
H, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4))
torch.matmul(X, W_xh) + torch.matmul(H, W_hh)
```
现在,我们沿列(轴1)拼接矩阵`X`和`H`,
沿行(轴0)拼接矩阵`W_xh`和`W_hh`。
这两个拼接分别产生形状$(3, 5)$和形状$(5, 4)$的矩阵。
再将这两个拼接的矩阵相乘,
我们得到与上面相同形状$(3, 4)$的输出矩阵。
```
torch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))
```
## 基于循环神经网络的字符级语言模型
回想一下 :numref:`sec_language_model`中的语言模型,
我们的目标是根据过去的和当前的词元预测下一个词元,
因此我们将原始序列移位一个词元作为标签。
Bengio等人首先提出使用神经网络进行语言建模
:cite:`Bengio.Ducharme.Vincent.ea.2003`。
接下来,我们看一下如何使用循环神经网络来构建语言模型。
设小批量大小为1,批量中的那个文本序列为“machine”。
为了简化后续部分的训练,我们考虑使用
*字符级语言模型*(character-level language model),
将文本词元化为字符而不是单词。
:numref:`fig_rnn_train`演示了
如何通过基于字符级语言建模的循环神经网络,
使用当前的和先前的字符预测下一个字符。

:label:`fig_rnn_train`
在训练过程中,我们对每个时间步的输出层的输出进行softmax操作,
然后利用交叉熵损失计算模型输出和标签之间的误差。
由于隐藏层中隐状态的循环计算,
:numref:`fig_rnn_train`中的第$3$个时间步的输出$\mathbf{O}_3$
由文本序列“m”、“a”和“c”确定。
由于训练数据中这个文本序列的下一个字符是“h”,
因此第$3$个时间步的损失将取决于下一个字符的概率分布,
而下一个字符是基于特征序列“m”、“a”、“c”和这个时间步的标签“h”生成的。
在实践中,我们使用的批量大小为$n>1$,
每个词元都由一个$d$维向量表示。
因此,在时间步$t$输入$\mathbf X_t$将是一个$n\times d$矩阵,
这与我们在 :numref:`subsec_rnn_w_hidden_states`中的讨论相同。
## 困惑度(Perplexity)
:label:`subsec_perplexity`
最后,让我们讨论如何度量语言模型的质量,
这将在后续部分中用于评估基于循环神经网络的模型。
一个好的语言模型能够用高度准确的词元来预测我们接下来会看到什么。
考虑一下由不同的语言模型给出的对“It is raining ...”(“...下雨了”)的续写:
1. "It is raining outside"(外面下雨了)
1. "It is raining banana tree"(香蕉树下雨了)
1. "It is raining piouw;kcj pwepoiut"(piouw;kcj pwepoiut下雨了)
就质量而言,例$1$显然是最合乎情理、在逻辑上最连贯的。
虽然这个模型可能没有很准确地反映出后续词的语义,
比如,“It is raining in San Francisco”(旧金山下雨了)
和“It is raining in winter”(冬天下雨了)
可能才是更完美的合理扩展,
但该模型已经能够捕捉到跟在后面的是哪类单词。
例$2$则要糟糕得多,因为其产生了一个无意义的续写。
尽管如此,至少该模型已经学会了如何拼写单词,
以及单词之间的某种程度的相关性。
最后,例$3$表明了训练不足的模型是无法正确地拟合数据的。
我们可以通过计算序列的似然概率来度量模型的质量。
然而这是一个难以理解、难以比较的数字。
毕竟,较短的序列比较长的序列更有可能出现,
因此评估模型产生托尔斯泰的巨著《战争与和平》的可能性
不可避免地会比产生圣埃克苏佩里的中篇小说《小王子》可能性要小得多。
而缺少的可能性值相当于平均数。
在这里,信息论可以派上用场了。
我们在引入softmax回归
( :numref:`subsec_info_theory_basics`)时定义了熵、惊异和交叉熵,
并在[信息论的在线附录](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/information-theory.html)
中讨论了更多的信息论知识。
如果想要压缩文本,我们可以根据当前词元集预测的下一个词元。
一个更好的语言模型应该能让我们更准确地预测下一个词元。
因此,它应该允许我们在压缩序列时花费更少的比特。
所以我们可以通过一个序列中所有的$n$个词元的交叉熵损失的平均值来衡量:
$$\frac{1}{n} \sum_{t=1}^n -\log P(x_t \mid x_{t-1}, \ldots, x_1),$$
:eqlabel:`eq_avg_ce_for_lm`
其中$P$由语言模型给出,
$x_t$是在时间步$t$从该序列中观察到的实际词元。
这使得不同长度的文档的性能具有了可比性。
由于历史原因,自然语言处理的科学家更喜欢使用一个叫做*困惑度*(perplexity)的量。
简而言之,它是 :eqref:`eq_avg_ce_for_lm`的指数:
$$\exp\left(-\frac{1}{n} \sum_{t=1}^n \log P(x_t \mid x_{t-1}, \ldots, x_1)\right).$$
困惑度的最好的理解是“下一个词元的实际选择数的调和平均数”。
我们看看一些案例:
* 在最好的情况下,模型总是完美地估计标签词元的概率为1。
在这种情况下,模型的困惑度为1。
* 在最坏的情况下,模型总是预测标签词元的概率为0。
在这种情况下,困惑度是正无穷大。
* 在基线上,该模型的预测是词表的所有可用词元上的均匀分布。
在这种情况下,困惑度等于词表中唯一词元的数量。
事实上,如果我们在没有任何压缩的情况下存储序列,
这将是我们能做的最好的编码方式。
因此,这种方式提供了一个重要的上限,
而任何实际模型都必须超越这个上限。
在接下来的小节中,我们将基于循环神经网络实现字符级语言模型,
并使用困惑度来评估这样的模型。
## 小结
* 对隐状态使用循环计算的神经网络称为循环神经网络(RNN)。
* 循环神经网络的隐状态可以捕获直到当前时间步序列的历史信息。
* 循环神经网络模型的参数数量不会随着时间步的增加而增加。
* 我们可以使用循环神经网络创建字符级语言模型。
* 我们可以使用困惑度来评价语言模型的质量。
## 练习
1. 如果我们使用循环神经网络来预测文本序列中的下一个字符,那么任意输出所需的维度是多少?
1. 为什么循环神经网络可以基于文本序列中所有先前的词元,在某个时间步表示当前词元的条件概率?
1. 如果你基于一个长序列进行反向传播,梯度会发生什么状况?
1. 与本节中描述的语言模型相关的问题有哪些?
[Discussions](https://discuss.d2l.ai/t/2100)
|
github_jupyter
|
import torch
from d2l import torch as d2l
X, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4))
H, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4))
torch.matmul(X, W_xh) + torch.matmul(H, W_hh)
torch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))
| 0.439026 | 0.918918 |
# Decentralized Voting Machine
For years, the powerful and the rich have influenced (or) sometimes rigged the election. <br> Using a decentrialzed application instead a generic client-server architecture, our project aims at a genuine choice of leadership. <br> This type of arrangement makes it almost impossible for anyone to tamper or hack. It is with a great leader that a nation can prosper.
## Problem Statement:
Elections conducted using conventional methods have a threefold problem
## 1. Prone to tampering/hacking
<img src="files/images/hack.jpg">
Even India has become victim to such attacks
<img src="files/images/hack2.PNG">
## 2. Wastage of resources & money
<img src="files/images/hack3.PNG">
## 3. Cost of Public Holiday & Convenience
<img src="http://nation.lk/online/wp-content/uploads/2015/07/Public-Holidays-Around-the-World_IPS.jpg">
## So how can we solve it?
## We are going to combine two powerful technologies - Blockchain + ML
## Module 1 - UIDAI Validation
Every user has to Login using their unique Aadhar ID and OTP. From these details such as "Age", "Gender", "State", "City" can be parsed.
## Module 2 - Voting Smart Contract
It further has four steps -
Step 1 - Setting up Environment <br>
Step 2 - Creating Voting Smart Contract <br>
Step 3 - Interacting with the Contract via the Nodejs Console <br>
Step 4 - Creating GUI interface <br>

## Step 1 - Setting up Environment
``npm install ethereumjs-testrpc web3``
testrpc creates 10 test accounts to play with automatically. These accounts come preloaded with 100 (fake) ethers.
## Step 2 - Creating Voting Smart Contract
- Ethereum's solidity programming language is used to write our contract
- Our contract (think of contract as a class) is called Voting with a constructor which initializes an array of candidates
- 2 functions, one to return the total votes a candidate has received & another to increment vote count for a candidate.
- Deployed contracts are immutable. If any changes, we just make a new one.
Install the dependencies on node console
``npm install solc``
After writing our smart contract, we'll use Web3js to deploy our app and interact with it
```
naman:~/DVM$ node
> Web3 = require('web3')
> web3 = new Web3(new Web3.providers.HttpProvider("http://localhost:8545"));
```
Then ensure Web3js is initalized and can query all accounts on the blockchain
```
> web3.eth.accounts
```
Lastly, compile the contract by loading the code from Voting.sol in to a string variable and compiling it
```
> code = fs.readFileSync('Voting.sol').toString()
> solc = require('solc')
> compiledCode = solc.compile(code)
```
Deploy the contract!
- dCode.contracts[‘:Voting’].bytecode: bytecode which will be deployed to the blockchain.
- compiledCode.contracts[‘:Voting’].interface: interface of the contract (called abi) which tells the contract user what methods are available in the contract.
```
> abiDefinition = JSON.parse(compiledCode.contracts[':Voting'].interface)
> VotingContract = web3.eth.contract(abiDefinition)
> byteCode = compiledCode.contracts[':Voting'].bytecode
> deployedContract = VotingContract.new(['Rahul Gandhi','Narendra Modi','Nitish Kumar'],{data: byteCode, from: web3.eth.accounts[0], gas: 4700000})
> deployedContract.address
> contractInstance = VotingContract.at(deployedContract.address)
```
- deployedContract.address. When you have to interact with your contract, you need this deployed address and abi definition we talked about earlier.
## Step 3 - Interacting with the Contract via the Nodejs Console
```
> contractInstance.totalVotesFor.call('Rahul Gandhi')
{ [String: '0'] s: 1, e: 0, c: [ 0 ] }
> contractInstance.voteForCandidate('Rahul Gandhi', {from: web3.eth.accounts[0]})
'0xdedc7ae544c3dde74ab5a0b07422c5a51b5240603d31074f5b75c0ebc786bf53'
> contractInstance.voteForCandidate('Rahul Gandhi', {from: web3.eth.accounts[0]})
'0x02c054d238038d68b65d55770fabfca592a5cf6590229ab91bbe7cd72da46de9'
> contractInstance.voteForCandidate('Rahul Gandhi', {from: web3.eth.accounts[0]})
'0x3da069a09577514f2baaa11bc3015a16edf26aad28dffbcd126bde2e71f2b76f'
> contractInstance.totalVotesFor.call('Rahul Gandhi').toLocaleString()
'3'
```
## Step 4 - Creating GUI interface
HTML + JS client is used for this purpose
## Module 3 - Dashboard
## Module 4 - Chatbot for Candidate enquiry
## Module 5 - Using ML to extract people's opinion
## Generic Decentralized app looks like this -

## So what's next?
1. Adding OCR to automate parsing
2. Scaling
3. Facilitation of Personal AI for specially abled people
4. Increasing Robustness of Dashboard
## Conclusion
We mave not be a billion dollar product but our product will impact billion lives
|
github_jupyter
|
naman:~/DVM$ node
> Web3 = require('web3')
> web3 = new Web3(new Web3.providers.HttpProvider("http://localhost:8545"));
> web3.eth.accounts
> code = fs.readFileSync('Voting.sol').toString()
> solc = require('solc')
> compiledCode = solc.compile(code)
> abiDefinition = JSON.parse(compiledCode.contracts[':Voting'].interface)
> VotingContract = web3.eth.contract(abiDefinition)
> byteCode = compiledCode.contracts[':Voting'].bytecode
> deployedContract = VotingContract.new(['Rahul Gandhi','Narendra Modi','Nitish Kumar'],{data: byteCode, from: web3.eth.accounts[0], gas: 4700000})
> deployedContract.address
> contractInstance = VotingContract.at(deployedContract.address)
> contractInstance.totalVotesFor.call('Rahul Gandhi')
{ [String: '0'] s: 1, e: 0, c: [ 0 ] }
> contractInstance.voteForCandidate('Rahul Gandhi', {from: web3.eth.accounts[0]})
'0xdedc7ae544c3dde74ab5a0b07422c5a51b5240603d31074f5b75c0ebc786bf53'
> contractInstance.voteForCandidate('Rahul Gandhi', {from: web3.eth.accounts[0]})
'0x02c054d238038d68b65d55770fabfca592a5cf6590229ab91bbe7cd72da46de9'
> contractInstance.voteForCandidate('Rahul Gandhi', {from: web3.eth.accounts[0]})
'0x3da069a09577514f2baaa11bc3015a16edf26aad28dffbcd126bde2e71f2b76f'
> contractInstance.totalVotesFor.call('Rahul Gandhi').toLocaleString()
'3'
| 0.352536 | 0.934155 |
<img align="right" width="250" src="http://www.sobigdata.eu/sites/default/files/logo-SoBigData-DEFINITIVO.png">
**Author:** [Riccardo Guidotti](http://kdd.isti.cnr.it/people/riccardo-guidotti)
**Python version:** 3.x
<img align="right" width="250" src="https://nullpointerexception1.files.wordpress.com/2017/11/decision-tree-e1513448957591.jpg?w=1400&h=9999">
# Classification with Python
This notebook contains an overview of basic python functionalities for classification using the [sklearn](http://scikit-learn.org/stable/) library.
Note: this notebook is purposely not 100% comprehensive, it only discusses the basic things you need to get started.
Import of the basic packages to use
```
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
```
<img align="right" width="150" src="https://archive.ics.uci.edu/ml/assets/MLimages/Large53.jpg">
## Iris Dataset
[Link](https://archive.ics.uci.edu/ml/datasets/iris) to the dataset on the UCI Machine Learning Repository.
As first step we load the whole Titanic Dataset and make confidence with its features.
```
df = pd.read_csv("../dataset/iris.csv", skipinitialspace=True, sep=',')
df.head()
df.info()
```
The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant.
The predictive attribute is the class of the iris plant.
Fetures:
* sepal length (in cm)
* sepal width (in cm)
* petal length (in cm)
* petal width (in cm)
* class: Iris-setosa, Iris-versicolour, Iris-virginica
Since classification is a ***supervised*** task we are interested in knowing the distribution of thetarget class.
```
df['class'].value_counts()
```
Sometimes is useful to map a set of string into a set of integers.
```
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
num_classes = le.fit_transform(df['class'])
print(num_classes[:5])
```
## Data Understanding
We observe the distributions of the attributes without considering the class.
```
from pandas.plotting import scatter_matrix
scatter_matrix(df, figsize=(10, 10), c=num_classes, s=50)
plt.show()
plt.scatter(df['sepal length'], df['petal width'], s=20, c=num_classes)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
```
## Classification Objective
Given a collection of records called ***training set*** where each record contains a set of ***attributes*** and one of the attributes is the ***target class***. The objective of classification is to find a model for the class attribute as a function of the values of other attributes.
The ***goal*** is to assign to a class previously unseen records as accurately as possible.
A ***test set*** is used to determine the accuracy of the model.
Usually, the given data set is divided into training and test sets, with training set used to build
the model and test set used to validate it.
<img align="center" width="650" src="http://images.slideplayer.com/15/4732696/slides/slide_4.jpg">
## Classification Techniques
* ***Decision Tree***
* ***Instance-based methods***
* Rule-based methods
* Neural Networks
* Naïve Bayes and Bayesian Belief Networks
* Support Vector Machines (SVM)
## Evaluating the Performance of a Classifier
In order to evaluate the quality of classification there exist several measures: all of them built upon the concept of **Confusion Matrix**.
**Confusion Matrix**
In the field of machine learning a confusion matrix is a specific table layout that allows visualization of the performance of an algorithm. Each row of the matrix represents the instances in a predicted class while each column represents the instances in an actual class (or vice versa).
<img align="right" width="300" src="https://rasbt.github.io/mlxtend/user_guide/evaluate/confusion_matrix_files/confusion_matrix_1.png">
Given a Target class:
* ***True Positive (TP)*** represent those instances correctly predicted to be True
* ***False Positive (FP)*** represent those instances incorrectly predicted to be True
* ***True Negative (TN)*** represent those instances correctly predicted to be False
* ***False Negative (FT)*** represent those instances incorrectly predicted to be False
Upon such classes are built several indicators.
Among the otehrs, two scores characterize the outcome of a predictive model: ***precision*** and ***recall***
* **Precision**: how many of the instances I predict to be True are really True? $\mathit{precision} = \frac{TP}{TP+FP}$
* **Recall**: how many True instances I was able to correctly predict? $\mathit{recall} = \frac{TP}{TP+FN}$
To summarize the overall performance of a model we can also use the ***accuracy*** and the ***f1-score***:
* The **accuracy** $= \frac{TP+TN}{TP+TN+FP+FN}$ captures the number of instances correctly classified above all
* $1-\mathit{accuracy}$ gives the errore rate, i.e., the error committed by the classifier.
* The **f1-score** $= \frac{2TP}{2TP+FP+FN}$ describes the armonic mean of precision and recall.
All these indicators are provided by [sklearn](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics).
```
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, f1_score, classification_report
from sklearn.metrics import roc_curve, auc, roc_auc_score
```
# Decision Tree
## Example of Decision Tree and Application
<img align="left" width="490" src="http://images.slideplayer.com/15/4732696/slides/slide_10.jpg">
<img align="right" width="490" src="http://images.slideplayer.com/15/4732696/slides/slide_13.jpg">
## The Algorithm in a Nutshell
**Objective:** Build the most accurate decision tree.
Given a set $D$ of training records.
* If $D_x$ contains records that belong the same class $y$, then this is a leaf node labeled as $y$;
* If $D_x$ contains records that belong to more than one class, use the **best attribute** to split the data into smaller subsets $D_1, \dots D_k$.
* Recursively apply the procedure to each subset.
How to determine the best split: nodes with ***homogeneous*** class distribution are preferred.
Thus, a measure of node ***impurity*** is required. Example of impurity nodes:
* Gini Index
* Entropy
* Misclassification error
How to determine when to stop splitting: there are various ***stopping criteria***:
* Stop expanding a node when all the records belong to the same class
* Stop expanding a node when all the records have similar attribute values
* Early termination (to be discussed later)
> Tan, P. N. (2006). Introduction to data mining. Pearson Education India.
Running [example](http://matlaspisa.isti.cnr.it:5055/Decision%20Tree)
Wikipedia [link](https://en.wikipedia.org/wiki/Decision_tree)
## Classification Problems
* Missing values: sophisticatd techniques are required to handle missing values
* The sklearn library does not allow missing values.
* Overfitting: the model is too accurate on the training data but its performance are poor on the test data.
* For a Decision Trees it means that the tree is more complex and deep than necessary.
## Decision Tree in Python ([sklearn](http://scikit-learn.org/stable/modules/tree.html))
```
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
```
Split the dataset into train and test
```
attributes = [col for col in df.columns if col != 'class']
X = df[attributes].values
y = df['class']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=100, stratify=y)
```
Build the decision tree
Parameters:
* **criterion** (default 'gini'): The function to measure the quality of a split. Available: gini, entropy.
* **max_depth** (default None): The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.
* **min_samples_split** (default 2): The minimum number of samples required to split an internal node.
* **min_samples_leaf** (default 1): The minimum number of samples required to be at a leaf node.
```
clf = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1)
clf = clf.fit(X_train, y_train)
```
Output:
* **feature\_importances_**: The feature importances. The higher, the more important the feature.
* **tree_**: The underlying Tree object.
Features Importance
```
for col, imp in zip(attributes, clf.feature_importances_):
print(col, imp)
```
Visualize the decision tree
```
import pydotplus
from sklearn import tree
from IPython.display import Image
dot_data = tree.export_graphviz(clf, out_file=None, feature_names=attributes, class_names=clf.classes_,
filled=True, rounded=True, special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
Image(graph.create_png())
```
Apply the decision tree on the training set
```
y_pred = clf.predict(X_train)
```
Evaluate the performance
```
print('Accuracy %s' % accuracy_score(y_train, y_pred))
print('F1-score %s' % f1_score(y_train, y_pred, average=None))
print(classification_report(y_train, y_pred))
confusion_matrix(y_train, y_pred)
```
Apply the decision tree on the test set and evaluate the performance
```
y_pred = clf.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred, average=None))
print(classification_report(y_test, y_pred))
confusion_matrix(y_test, y_pred)
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer()
lb.fit(y_test)
lb.classes_.tolist()
fpr = dict()
tpr = dict()
roc_auc = dict()
by_test = lb.transform(y_test)
by_pred = lb.transform(y_pred)
for i in range(3):
fpr[i], tpr[i], _ = roc_curve(by_test[:, i], by_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
roc_auc = roc_auc_score(by_test, by_pred, average=None)
roc_auc
plt.figure(figsize=(8, 5))
for i in range(3):
plt.plot(fpr[i], tpr[i],
label='%s ROC curve (area = %0.2f)' % (lb.classes_.tolist()[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.show()
```
### Cross Validation
More options at [link](http://scikit-learn.org/stable/modules/cross_validation.html#)
```
from sklearn.model_selection import cross_val_score
scores = cross_val_score(clf, X, y, cv=10)
print('Accuracy: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, X, y, cv=10, scoring='f1_macro')
print('F1-score: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
```
### Tuning the hyper-parameters
More options at [link](http://scikit-learn.org/stable/modules/grid_search.html#grid-search)
```
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
param_list = {'min_samples_split': [2, 5, 10, 20],
'min_samples_leaf': [1, 5, 10, 20],
}
grid_search = GridSearchCV(clf, param_grid=param_list)
grid_search.fit(X, y)
clf = grid_search.best_estimator_
report(grid_search.cv_results_, n_top=3)
param_list = {'max_depth': [None] + list(np.arange(2, 20)),
'min_samples_split': [2, 5, 10, 20, 30, 50, 100],
'min_samples_leaf': [1, 5, 10, 20, 30, 50, 100],
}
random_search = RandomizedSearchCV(clf, param_distributions=param_list, n_iter=100)
random_search.fit(X, y)
clf = random_search.best_estimator_
report(random_search.cv_results_, n_top=3)
```
## Any other Sklearn classifier can be used in the same way
Let see two examples: Random Forest and K-Nearest Neighbor
# Random Forest
Sklearn [link](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) for more details.
```
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100, criterion='gini', max_depth=None,
min_samples_split=2, min_samples_leaf=1, class_weight=None)
scores = cross_val_score(clf, X, y, cv=10)
print('Accuracy: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, X, y, cv=10, scoring='f1_macro')
print('F1-score: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
```
# K-Nearest Neighbors
Sklearn [link](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html) for more details.
```
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier(n_neighbors=5)
scores = cross_val_score(clf, X, y, cv=10)
print('Accuracy: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, X, y, cv=10, scoring='f1_macro')
print('F1-score: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
```
|
github_jupyter
|
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("../dataset/iris.csv", skipinitialspace=True, sep=',')
df.head()
df.info()
df['class'].value_counts()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
num_classes = le.fit_transform(df['class'])
print(num_classes[:5])
from pandas.plotting import scatter_matrix
scatter_matrix(df, figsize=(10, 10), c=num_classes, s=50)
plt.show()
plt.scatter(df['sepal length'], df['petal width'], s=20, c=num_classes)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.show()
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, f1_score, classification_report
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
attributes = [col for col in df.columns if col != 'class']
X = df[attributes].values
y = df['class']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=100, stratify=y)
clf = DecisionTreeClassifier(criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1)
clf = clf.fit(X_train, y_train)
for col, imp in zip(attributes, clf.feature_importances_):
print(col, imp)
import pydotplus
from sklearn import tree
from IPython.display import Image
dot_data = tree.export_graphviz(clf, out_file=None, feature_names=attributes, class_names=clf.classes_,
filled=True, rounded=True, special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
Image(graph.create_png())
y_pred = clf.predict(X_train)
print('Accuracy %s' % accuracy_score(y_train, y_pred))
print('F1-score %s' % f1_score(y_train, y_pred, average=None))
print(classification_report(y_train, y_pred))
confusion_matrix(y_train, y_pred)
y_pred = clf.predict(X_test)
print('Accuracy %s' % accuracy_score(y_test, y_pred))
print('F1-score %s' % f1_score(y_test, y_pred, average=None))
print(classification_report(y_test, y_pred))
confusion_matrix(y_test, y_pred)
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer()
lb.fit(y_test)
lb.classes_.tolist()
fpr = dict()
tpr = dict()
roc_auc = dict()
by_test = lb.transform(y_test)
by_pred = lb.transform(y_pred)
for i in range(3):
fpr[i], tpr[i], _ = roc_curve(by_test[:, i], by_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
roc_auc = roc_auc_score(by_test, by_pred, average=None)
roc_auc
plt.figure(figsize=(8, 5))
for i in range(3):
plt.plot(fpr[i], tpr[i],
label='%s ROC curve (area = %0.2f)' % (lb.classes_.tolist()[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=22)
plt.legend(loc="lower right", fontsize=14, frameon=False)
plt.show()
from sklearn.model_selection import cross_val_score
scores = cross_val_score(clf, X, y, cv=10)
print('Accuracy: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, X, y, cv=10, scoring='f1_macro')
print('F1-score: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
param_list = {'min_samples_split': [2, 5, 10, 20],
'min_samples_leaf': [1, 5, 10, 20],
}
grid_search = GridSearchCV(clf, param_grid=param_list)
grid_search.fit(X, y)
clf = grid_search.best_estimator_
report(grid_search.cv_results_, n_top=3)
param_list = {'max_depth': [None] + list(np.arange(2, 20)),
'min_samples_split': [2, 5, 10, 20, 30, 50, 100],
'min_samples_leaf': [1, 5, 10, 20, 30, 50, 100],
}
random_search = RandomizedSearchCV(clf, param_distributions=param_list, n_iter=100)
random_search.fit(X, y)
clf = random_search.best_estimator_
report(random_search.cv_results_, n_top=3)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100, criterion='gini', max_depth=None,
min_samples_split=2, min_samples_leaf=1, class_weight=None)
scores = cross_val_score(clf, X, y, cv=10)
print('Accuracy: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, X, y, cv=10, scoring='f1_macro')
print('F1-score: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier(n_neighbors=5)
scores = cross_val_score(clf, X, y, cv=10)
print('Accuracy: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, X, y, cv=10, scoring='f1_macro')
print('F1-score: %0.4f (+/- %0.2f)' % (scores.mean(), scores.std() * 2))
| 0.617859 | 0.987079 |
This notebook generates forcing files for the 2D domain.
Plan: use 3D boundary files, average across the mouth of Juan de Fuca and set uniformly across domain.
```
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
import os
from salishsea_tools import tidetools,nc_tools
%matplotlib inline
```
# Load 3D T+S
```
f = nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/open_boundaries/west/SalishSea2_Masson_DC.nc')
T = f.variables['votemper'][:]
S = f.variables['vosaline'][:]
print S.shape
depth = f.variables['deptht'][:]
times = f.variables['time_counter'][:]
nc_tools.show_dimensions(f)
```
There are 52 weeks, 40 depth levels, 87 points across mouth, and 10 points into domain.
# Average across mouth
```
ntime=52; ndepth=40; nrim = 10
width_3d=87
Tmean = np.zeros((ntime,ndepth,nrim))
Smean = np.zeros((ntime,ndepth,nrim))
for i in np.arange(nrim):
ind = i*width_3d
Tmean[:,:,i] = np.nanmean(T[:,:,0,ind:ind+width_3d], axis=2)
Smean[:,:,i] = np.nanmean(S[:,:,0,ind:ind+width_3d], axis=2)
plt.pcolormesh(Smean[:,:,0].T)
plt.axis([0,ntime,ndepth,0])
plt.colorbar()
plt.pcolormesh(Tmean[:,:,0].T)
plt.axis([0,ntime,ndepth,0])
plt.colorbar()
plt.pcolormesh(Smean[0,:,:])
plt.axis([0,nrim,ndepth,0])
plt.colorbar()
plt.pcolormesh(Tmean[0,:,:])
plt.axis([0,nrim,ndepth,0])
plt.colorbar()
```
Looks reasonable.
#Copy across Y
```
Ny=8 #only 8 because of masked edges
T_y=np.tile(Tmean,Ny)
T_y=T_y.reshape(ntime,ndepth,Ny,nrim)
T_y.shape
plt.pcolormesh(T_y[0,:,:,0])
plt.axis([0,Ny,ndepth,0])
plt.colorbar()
S_y=np.tile(Smean,Ny)
S_y=S_y.reshape(ntime,ndepth,Ny,nrim)
S_y.shape
plt.pcolormesh(S_y[0,:,:,3])
plt.axis([0,Ny,ndepth,0])
plt.colorbar()
print S_y.min()
```
#Untile
Need to order the data from closest to the edge to furthest from the edge
```
T_untile = np.zeros((ntime,ndepth,1,Ny*nrim))
S_untile = np.zeros((ntime,ndepth,1,Ny*nrim))
for i in np.arange(nrim):
ind = i*Ny
T_untile[:,:,0,ind:ind+Ny] = T_y[:,:,:,i]
S_untile[:,:,0,ind:ind+Ny] = S_y[:,:,:,i]
plt.pcolormesh(T_untile[0,:,0,:])
plt.axis([0,Ny*nrim,ndepth,0])
plt.colorbar()
plt.pcolormesh(S_untile[0,:,0,:])
plt.axis([0,Ny*nrim,ndepth,0])
plt.colorbar()
```
#Save to netcdf
```
nemo = nc.Dataset('../boundary_conditions/TS_OBC.nc', 'w', zlib=True)
#start and end points
length_rim =nrim
lengthi=Ny*length_rim
#time and depth
depth_levels =ndepth
# dataset attributes
nc_tools.init_dataset_attrs(
nemo,
title='Temperature and Salinty Boundary Conditions 2D domain',
notebook_name='Generate T+S Forcing - NEMO3.6',
nc_filepath='/data/nsoontie/MEOPAR/2Ddomain/TS_OBC.nc',
comment='based on average values across mouth of JdF and 3D weekly climatology')
# dimensions
nemo.createDimension('xb', lengthi)
nemo.createDimension('yb', 1)
nemo.createDimension('time_counter', None)
nemo.createDimension('deptht', depth_levels)
# variables
# deptht
deptht = nemo.createVariable('deptht', 'float32', ('deptht',))
deptht.long_name = 'Vertical T Levels'
deptht.units = 'm'
deptht.positive = 'down'
deptht.valid_range = np.array((4., 428.))
deptht[:]=depth
# time_counter
time_counter = nemo.createVariable('time_counter', 'float32', ('time_counter'))
time_counter.long_name = 'Time axis'
time_counter.axis = 'T'
time_counter.units = 'weeks since beginning of year'
time_counter[:]=times
# votemper
votemper = nemo.createVariable('votemper', 'float32',
('time_counter','deptht','yb','xb'))
votemper.units = 'degC'
votemper.long_name = 'Temperature'
votemper.grid = 'SalishSea2D'
votemper[:]=T_untile
# vosaline
vosaline = nemo.createVariable('vosaline', 'float32',
('time_counter','deptht','yb','xb'))
vosaline.units = 1
vosaline.long_name = 'Practical Salinity'
vosaline.grid = 'SalishSea2D'
vosaline[:]=S_untile
# nbidta, ndjdta, ndrdta
nbidta = nemo.createVariable('nbidta', 'int32' , ('yb','xb'))
nbidta.long_name = 'i grid position'
nbidta.units = 1
nbjdta = nemo.createVariable('nbjdta', 'int32' , ('yb','xb'))
nbjdta.long_name = 'j grid position'
nbjdta.units = 1
nbrdta = nemo.createVariable('nbrdta', 'int32' , ('yb','xb'))
nbrdta.long_name = 'position from boundary'
nbrdta.units = 1
for ir in range(length_rim):
nbidta[0,ir*Ny:(ir+1)*Ny] = ir
nbjdta[0,ir*Ny:(ir+1)*Ny] = range(Ny)
nbrdta[0,ir*Ny:(ir+1)*Ny] = ir
nemo.close()
```
|
github_jupyter
|
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
import os
from salishsea_tools import tidetools,nc_tools
%matplotlib inline
f = nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/open_boundaries/west/SalishSea2_Masson_DC.nc')
T = f.variables['votemper'][:]
S = f.variables['vosaline'][:]
print S.shape
depth = f.variables['deptht'][:]
times = f.variables['time_counter'][:]
nc_tools.show_dimensions(f)
ntime=52; ndepth=40; nrim = 10
width_3d=87
Tmean = np.zeros((ntime,ndepth,nrim))
Smean = np.zeros((ntime,ndepth,nrim))
for i in np.arange(nrim):
ind = i*width_3d
Tmean[:,:,i] = np.nanmean(T[:,:,0,ind:ind+width_3d], axis=2)
Smean[:,:,i] = np.nanmean(S[:,:,0,ind:ind+width_3d], axis=2)
plt.pcolormesh(Smean[:,:,0].T)
plt.axis([0,ntime,ndepth,0])
plt.colorbar()
plt.pcolormesh(Tmean[:,:,0].T)
plt.axis([0,ntime,ndepth,0])
plt.colorbar()
plt.pcolormesh(Smean[0,:,:])
plt.axis([0,nrim,ndepth,0])
plt.colorbar()
plt.pcolormesh(Tmean[0,:,:])
plt.axis([0,nrim,ndepth,0])
plt.colorbar()
Ny=8 #only 8 because of masked edges
T_y=np.tile(Tmean,Ny)
T_y=T_y.reshape(ntime,ndepth,Ny,nrim)
T_y.shape
plt.pcolormesh(T_y[0,:,:,0])
plt.axis([0,Ny,ndepth,0])
plt.colorbar()
S_y=np.tile(Smean,Ny)
S_y=S_y.reshape(ntime,ndepth,Ny,nrim)
S_y.shape
plt.pcolormesh(S_y[0,:,:,3])
plt.axis([0,Ny,ndepth,0])
plt.colorbar()
print S_y.min()
T_untile = np.zeros((ntime,ndepth,1,Ny*nrim))
S_untile = np.zeros((ntime,ndepth,1,Ny*nrim))
for i in np.arange(nrim):
ind = i*Ny
T_untile[:,:,0,ind:ind+Ny] = T_y[:,:,:,i]
S_untile[:,:,0,ind:ind+Ny] = S_y[:,:,:,i]
plt.pcolormesh(T_untile[0,:,0,:])
plt.axis([0,Ny*nrim,ndepth,0])
plt.colorbar()
plt.pcolormesh(S_untile[0,:,0,:])
plt.axis([0,Ny*nrim,ndepth,0])
plt.colorbar()
nemo = nc.Dataset('../boundary_conditions/TS_OBC.nc', 'w', zlib=True)
#start and end points
length_rim =nrim
lengthi=Ny*length_rim
#time and depth
depth_levels =ndepth
# dataset attributes
nc_tools.init_dataset_attrs(
nemo,
title='Temperature and Salinty Boundary Conditions 2D domain',
notebook_name='Generate T+S Forcing - NEMO3.6',
nc_filepath='/data/nsoontie/MEOPAR/2Ddomain/TS_OBC.nc',
comment='based on average values across mouth of JdF and 3D weekly climatology')
# dimensions
nemo.createDimension('xb', lengthi)
nemo.createDimension('yb', 1)
nemo.createDimension('time_counter', None)
nemo.createDimension('deptht', depth_levels)
# variables
# deptht
deptht = nemo.createVariable('deptht', 'float32', ('deptht',))
deptht.long_name = 'Vertical T Levels'
deptht.units = 'm'
deptht.positive = 'down'
deptht.valid_range = np.array((4., 428.))
deptht[:]=depth
# time_counter
time_counter = nemo.createVariable('time_counter', 'float32', ('time_counter'))
time_counter.long_name = 'Time axis'
time_counter.axis = 'T'
time_counter.units = 'weeks since beginning of year'
time_counter[:]=times
# votemper
votemper = nemo.createVariable('votemper', 'float32',
('time_counter','deptht','yb','xb'))
votemper.units = 'degC'
votemper.long_name = 'Temperature'
votemper.grid = 'SalishSea2D'
votemper[:]=T_untile
# vosaline
vosaline = nemo.createVariable('vosaline', 'float32',
('time_counter','deptht','yb','xb'))
vosaline.units = 1
vosaline.long_name = 'Practical Salinity'
vosaline.grid = 'SalishSea2D'
vosaline[:]=S_untile
# nbidta, ndjdta, ndrdta
nbidta = nemo.createVariable('nbidta', 'int32' , ('yb','xb'))
nbidta.long_name = 'i grid position'
nbidta.units = 1
nbjdta = nemo.createVariable('nbjdta', 'int32' , ('yb','xb'))
nbjdta.long_name = 'j grid position'
nbjdta.units = 1
nbrdta = nemo.createVariable('nbrdta', 'int32' , ('yb','xb'))
nbrdta.long_name = 'position from boundary'
nbrdta.units = 1
for ir in range(length_rim):
nbidta[0,ir*Ny:(ir+1)*Ny] = ir
nbjdta[0,ir*Ny:(ir+1)*Ny] = range(Ny)
nbrdta[0,ir*Ny:(ir+1)*Ny] = ir
nemo.close()
| 0.199659 | 0.800185 |
# Random Forest Example
Implement Random Forest algorithm with TensorFlow, and apply it to classify
handwritten digit images. This example is using the MNIST database of
handwritten digits as training samples (http://yann.lecun.com/exdb/mnist/).
- Author: Aymeric Damien
- Project: https://github.com/aymericdamien/TensorFlow-Examples/
These lessons are adapted from [aymericdamien TensorFlow tutorials](https://github.com/aymericdamien/TensorFlow-Examples)
/ [GitHub](https://github.com/aymericdamien/TensorFlow-Examples)
which are published under the [MIT License](https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/LICENSE) which allows very broad use for both academic and commercial purposes.
```
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.tensor_forest.python import tensor_forest
# Ignore all GPUs, tf random forest does not benefit from it.
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
# Parameters
num_steps = 500 # Total steps to train
batch_size = 1024 # The number of samples per batch
num_classes = 10 # The 10 digits
num_features = 784 # Each image is 28x28 pixels
num_trees = 10
max_nodes = 1000
# Input and Target data
X = tf.placeholder(tf.float32, shape=[None, num_features])
# For random forest, labels must be integers (the class id)
Y = tf.placeholder(tf.int32, shape=[None])
# Random Forest Parameters
hparams = tensor_forest.ForestHParams(num_classes=num_classes,
num_features=num_features,
num_trees=num_trees,
max_nodes=max_nodes).fill()
# Build the Random Forest
forest_graph = tensor_forest.RandomForestGraphs(hparams)
# Get training graph and loss
train_op = forest_graph.training_graph(X, Y)
loss_op = forest_graph.training_loss(X, Y)
# Measure the accuracy
infer_op = forest_graph.inference_graph(X)
correct_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y, tf.int64))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Initialize the variables (i.e. assign their default value)
init_vars = tf.global_variables_initializer()
# Start TensorFlow session
sess = tf.Session()
# Run the initializer
sess.run(init_vars)
# Training
for i in range(1, num_steps + 1):
# Prepare Data
# Get the next batch of MNIST data (only images are needed, not labels)
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, l = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
if i % 50 == 0 or i == 1:
acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y})
print('Step %i, Loss: %f, Acc: %f' % (i, l, acc))
# Test Model
test_x, test_y = mnist.test.images, mnist.test.labels
print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))
```
|
github_jupyter
|
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.tensor_forest.python import tensor_forest
# Ignore all GPUs, tf random forest does not benefit from it.
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
# Parameters
num_steps = 500 # Total steps to train
batch_size = 1024 # The number of samples per batch
num_classes = 10 # The 10 digits
num_features = 784 # Each image is 28x28 pixels
num_trees = 10
max_nodes = 1000
# Input and Target data
X = tf.placeholder(tf.float32, shape=[None, num_features])
# For random forest, labels must be integers (the class id)
Y = tf.placeholder(tf.int32, shape=[None])
# Random Forest Parameters
hparams = tensor_forest.ForestHParams(num_classes=num_classes,
num_features=num_features,
num_trees=num_trees,
max_nodes=max_nodes).fill()
# Build the Random Forest
forest_graph = tensor_forest.RandomForestGraphs(hparams)
# Get training graph and loss
train_op = forest_graph.training_graph(X, Y)
loss_op = forest_graph.training_loss(X, Y)
# Measure the accuracy
infer_op = forest_graph.inference_graph(X)
correct_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y, tf.int64))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Initialize the variables (i.e. assign their default value)
init_vars = tf.global_variables_initializer()
# Start TensorFlow session
sess = tf.Session()
# Run the initializer
sess.run(init_vars)
# Training
for i in range(1, num_steps + 1):
# Prepare Data
# Get the next batch of MNIST data (only images are needed, not labels)
batch_x, batch_y = mnist.train.next_batch(batch_size)
_, l = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
if i % 50 == 0 or i == 1:
acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y})
print('Step %i, Loss: %f, Acc: %f' % (i, l, acc))
# Test Model
test_x, test_y = mnist.test.images, mnist.test.labels
print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))
| 0.76708 | 0.986468 |
# Time Series and Machine Learning Primer
```
# PLOTTING A TIME SERIES 1
# Print the first 5 rows of data
print(data.head())
# Print the first 5 rows of data2
print(data2.head())
# Plot the time series in each dataset
fig, axs = plt.subplots(2, 1, figsize=(5, 10))
data.iloc[:1000].plot(y="data_values", ax=axs[0])
data2.iloc[:1000].plot(y="data_values", ax=axs[1])
plt.show()
# PLOTTING A TIME SERIES 2
# Plot the time series in each dataset
fig, axs = plt.subplots(2, 1, figsize=(5, 10))
data.iloc[:1000].plot(x='time', y='data_values', ax=axs[0])
data2.iloc[:1000].plot(x='time', y='data_values', ax=axs[1])
plt.show()
# FITTING A SIMPLE MODEL
# Print the first 5 rows for inspection
print(data.head())
from sklearn.svm import LinearSVC
# Construct data for the model
X = data[['petal length (cm)', 'petal width (cm)']]
y = data[['target']]
# Fit the model
model = LinearSVC()
model.fit(X, y)
# PREDICTING USING A CLASSIFICATION MODEL
# Create input array
X_predict = targets[['petal length (cm)', 'petal width (cm)']]
# Predict with the model
predictions = model.predict(X_predict)
print(predictions)
# Visualize predictions and actual values
plt.scatter(X_predict['petal length (cm)'], X_predict['petal width (cm)'],
c=predictions, cmap=plt.cm.coolwarm)
plt.title("Predicted class values")
plt.show()
# FITTING A SIMPLE MODEL REGRESSION
from sklearn import linear_model
# Prepare input and output DataFrames
X = boston[['AGE']]
y = boston[['RM']]
# Fit the model
model = linear_model.LinearRegression()
model.fit(X, y)
# PREDICTING USING REGRESSION and RESHAPE
# Generate predictions with the model using those inputs
predictions = model.predict(new_inputs.reshape([-1, 1]))
# Visualize the inputs and predicted values
plt.scatter(new_inputs, predictions, color='r', s=3)
plt.xlabel('inputs')
plt.ylabel('predictions')
plt.show()
# INSPECTING AUDIO DATA
import librosa as lr
from glob import glob
# List all the wav files in the folder
audio_files = glob(data_dir + '/*.wav')
# Read in the first audio file, create the time array
audio, sfreq = lr.load(audio_files[0])
time = np.arange(0, len(audio)) / sfreq
# Plot audio over time
fig, ax = plt.subplots()
ax.plot(time, audio)
ax.set(xlabel='Time (s)', ylabel='Sound Amplitude')
plt.show()
# INSPECTING REGRESSION DATA
# Read in the data
data = pd.read_csv('prices.csv', index_col=0)
# Convert the index of the DataFrame to datetime
data.index = pd.to_datetime(data.index)
print(data.head())
# Loop through each column, plot its values over time
fig, ax = plt.subplots()
for column in data:
data[column].plot(ax=ax, label=column)
ax.legend()
plt.show()
```
# Time Series as Inputs to a Model (Classification)
```
# AVERAGING ACROSS TIMESERIES (abnormal vs normal heartbeats)
fig, axs = plt.subplots(3, 2, figsize=(15, 7), sharex=True, sharey=True)
# Calculate the time array
time = np.arange(normal.shape[0]) / sfreq
# Stack the normal/abnormal audio so you can loop and plot
stacked_audio = np.hstack([normal, abnormal]).T
# Loop through each audio file / ax object and plot
# .T.ravel() transposes the array, then unravels it into a 1-D vector for looping
for iaudio, ax in zip(stacked_audio, axs.T.ravel()):
ax.plot(time, iaudio)
show_plot_and_make_titles()
# AVERAGING ACROSS TIMESERIES (abnormal vs normal heartbeats)
# Average across the audio files of each DataFrame
mean_normal = np.mean(normal, axis=1)
mean_abnormal = np.mean(abnormal, axis=1)
# Plot each average over time
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3), sharey=True)
ax1.plot(time, mean_normal)
ax1.set(title="Normal Data")
ax2.plot(time, mean_abnormal)
ax2.set(title="Abnormal Data")
plt.show()
# BUILD A CLASSIFICATION MODEL
from sklearn.svm import LinearSVC
# Initialize and fit the model
model = LinearSVC()
model.fit(X_train, y_train)
# Generate predictions and score them manually
predictions = model.predict(X_test)
print(sum(predictions == y_test.squeeze()) / len(y_test))
# CALCULATING THE ENVELOPE OF SOUND
# Plot the raw data first
audio.plot(figsize=(10, 5))
plt.show()
# Rectify the audio signal
audio_rectified = audio.apply(np.abs)
# Plot the result
audio_rectified.plot(figsize=(10, 5))
plt.show()
# Smooth by applying a rolling mean
audio_rectified_smooth = audio_rectified.rolling(50).mean()
# Plot the result
audio_rectified_smooth.plot(figsize=(10, 5))
plt.show()
# CALCULATING FEATURES FROM THE ENVELOPE
# Calculate stats
means = np.mean(audio_rectified_smooth, axis=0)
stds = np.std(audio_rectified_smooth, axis=0)
maxs = np.max(audio_rectified_smooth, axis=0)
# Create the X and y arrays
X = np.column_stack([means, stds, maxs])
y = labels.reshape([-1, 1])
# Fit the model and score on testing data
from sklearn.model_selection import cross_val_score
percent_score = cross_val_score(model, X, y, cv=5)
print(np.mean(percent_score))
# DERIVATIVE FEATURES: THE TEMPOGRAM
# Calculate the tempo of the sounds
tempos = []
for col, i_audio in audio.items():
tempos.append(lr.beat.tempo(i_audio.values, sr=sfreq, hop_length=2**6, aggregate=None))
# Convert the list to an array so you can manipulate it more easily
tempos = np.array(tempos)
# Calculate statistics of each tempo
tempos_mean = tempos.mean(axis=-1)
tempos_std = tempos.std(axis=-1)
tempos_max = tempos.max(axis=-1)
# Create the X and y arrays
X = np.column_stack([means, stds, maxs, tempos_mean, tempos_std, tempos_max])
y = labels.reshape([-1, 1])
# Fit the model and score on testing data
percent_score = cross_val_score(model, X, y, cv=5)
print(np.mean(percent_score))
# SPECTROGRAMS OF HEARTBEAT DATA
# Import the stft function
from librosa.core import stft
# Prepare the STFT
HOP_LENGTH = 2**4
spec = stft(audio, hop_length=HOP_LENGTH, n_fft=2**7)
from librosa.core import amplitude_to_db
from librosa.display import specshow
# Convert into decibels
spec_db = amplitude_to_db(spec)
# Compare the raw audio to the spectrogram of the audio
fig, axs = plt.subplots(2, 1, figsize=(10, 10), sharex=True)
axs[0].plot(time, audio)
specshow(spec_db, sr=sfreq, x_axis='time', y_axis='hz', hop_length=HOP_LENGTH)
plt.show()
# ENGINEERING SPECIAL FEATURES
import librosa as lr
# Calculate the spectral centroid and bandwidth for the spectrogram
bandwidths = lr.feature.spectral_bandwidth(S=spec)[0]
centroids = lr.feature.spectral_centroid(S=spec)[0]
from librosa.core import amplitude_to_db
from librosa.display import specshow
# Convert spectrogram to decibels for visualization
spec_db = amplitude_to_db(spec)
# Display these features on top of the spectrogram
fig, ax = plt.subplots(figsize=(10, 5))
ax = specshow(spec_db, x_axis='time', y_axis='hz', hop_length=HOP_LENGTH)
ax.plot(times_spec, centroids)
ax.fill_between(times_spec, centroids - bandwidths / 2, centroids + bandwidths / 2, alpha=.5)
ax.set(ylim=[None, 6000])
plt.show()
# COMBINING MANY FEATURES INTO A CLASSIFIER
# Loop through each spectrogram
bandwidths = []
centroids = []
for spec in spectrograms:
# Calculate the mean spectral bandwidth
this_mean_bandwidth = np.mean(lr.feature.spectral_bandwidth(S=spec))
# Calculate the mean spectral centroid
this_mean_centroid = np.mean(lr.feature.spectral_centroid(S=spec))
# Collect the values
bandwidths.append(this_mean_bandwidth)
centroids.append(this_mean_centroid)
# Create X and y arrays
X = np.column_stack([means, stds, maxs, tempo_mean, tempo_max, tempo_std, bandwidths, centroids])
y = labels.reshape([-1, 1])
# Fit the model and score on testing data
percent_score = cross_val_score(model, X, y, cv=5)
print(np.mean(percent_score))
```
# Predicting Time Series Data (Regression)
```
# INTRO THE DATASET (LINE VS COLOR SCATTERPLOT)
# Plot the raw values over time
prices.plot()
plt.show()
# Scatterplot with one company per axis
prices.plot.scatter('EBAY', 'YHOO')
plt.show()
# Scatterplot with color relating to time
prices.plot.scatter('EBAY', 'YHOO', c=prices.index,
cmap=plt.cm.viridis, colorbar=False)
plt.show()
# FITTING A SIMPLE REGRESSION MODEL
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
# Use stock symbols to extract training data
X = all_prices[['EBAY', 'NVDA', "YHOO"]]
y = all_prices[['AAPL']]
# Fit and score the model with cross-validation
scores = cross_val_score(Ridge(), X, y, cv=3)
print(scores)
# VISUALIZE PREDICTED VALUES
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
# Split our data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=.8, shuffle=False, random_state=1)
# Fit our model and generate predictions
model = Ridge()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
score = r2_score(y_test, predictions)
print(score)
# Visualize our predictions along with the "true" values, and print the score
fig, ax = plt.subplots(figsize=(15, 5))
ax.plot(y_test, color='k', lw=3)
ax.plot(predictions, color='r', lw=2)
plt.show()
# VISUALIZE MESSY DATA
# Visualize the dataset
prices.plot(legend=False)
plt.tight_layout()
plt.show()
# Count the missing values of each time series
missing_values = prices.isnull().sum()
print(missing_values)
# IMPUTE MISSING VALUES
# Create a function we'll use to interpolate and plot
def interpolate_and_plot(prices, interpolation):
# Create a boolean mask for missing values
missing_values = prices.isna()
# Interpolate the missing values
prices_interp = prices.interpolate(interpolation)
# Plot the results, highlighting the interpolated values in black
fig, ax = plt.subplots(figsize=(10, 5))
prices_interp.plot(color='k', alpha=.6, ax=ax, legend=False)
# Now plot the interpolated values on top in red
prices_interp[missing_values].plot(ax=ax, color='r', lw=3, legend=False)
plt.show()
# Interpolate using the latest non-missing value
interpolation_type = 'zero'
interpolate_and_plot(prices, interpolation_type)
# Interpolate linearly
interpolation_type = 'linear'
interpolate_and_plot(prices, interpolation_type)
# Interpolate with a quadratic function
interpolation_type = 'quadratic'
interpolate_and_plot(prices, interpolation_type)
# TRANSFORMING RAW DATA
# Your custom function
def percent_change(series):
# Collect all *but* the last value of this window, then the final value
previous_values = series[:-1]
last_value = series[-1]
# Calculate the % difference between the last value and the mean of earlier values
percent_change = (last_value - np.mean(previous_values)) / np.mean(previous_values)
return percent_change
# Apply your custom function and plot
prices_perc = prices.rolling(20).aggregate(percent_change)
prices_perc.loc["2014":"2015"].plot()
plt.show()
# HANDLING OUTLIERS
def replace_outliers(series):
# Calculate the absolute difference of each timepoint from the series mean
absolute_differences_from_mean = np.abs(series - np.mean(series))
# Calculate a mask for the differences that are > 3 standard deviations from zero
this_mask = absolute_differences_from_mean > (np.std(series) * 3)
# Replace these values with the median accross the data
series[this_mask] = np.nanmedian(series)
return series
# Apply your preprocessing function to the timeseries and plot the results
prices_perc = prices_perc.apply(replace_outliers)
prices_perc.loc["2014":"2015"].plot()
plt.show()
# ENGINEERING MULTIPLE ROLLING FEATURES AT ONCE
# Define a rolling window with Pandas, excluding the right-most datapoint of the window
prices_perc_rolling = prices_perc.rolling(20, min_periods=5, closed='right')
# Define the features you'll calculate for each window
features_to_calculate = [np.min, np.max, np.mean, np.std]
# Calculate these features for your rolling window object
features = prices_perc_rolling.aggregate(features_to_calculate)
# Plot the results
ax = features.loc[:"2011-01"].plot()
prices_perc.loc[:"2011-01"].plot(ax=ax, color='k', alpha=.2, lw=3)
ax.legend(loc=(1.01, .6))
plt.show()
# PERCENTILES AND PARTIAL FUNCTIONS
# Import partial from functools
from functools import partial
percentiles = [1, 10, 25, 50, 75, 90, 99]
# Use a list comprehension to create a partial function for each quantile
percentile_functions = [partial(np.percentile, q=percentile) for percentile in percentiles]
# Calculate each of these quantiles on the data using a rolling window
prices_perc_rolling = prices_perc.rolling(20, min_periods=5, closed='right')
features_percentiles = prices_perc_rolling.aggregate(percentile_functions)
# Plot a subset of the result
ax = features_percentiles.loc[:"2011-01"].plot(cmap=plt.cm.viridis)
ax.legend(percentiles, loc=(1.01, .5))
plt.show()
# DATETIME DAY WEEK MONTH
# Extract date features from the data, add them as columns
prices_perc['day_of_week'] = prices_perc.index.weekday
prices_perc['week_of_year'] = prices_perc.index.week
prices_perc['month_of_year'] = prices_perc.index.month
# Print prices_perc
print(prices_perc)
```
# Validating and Inspecting Time Series Models
```
# CREATING TIME SHIFTED FEATURES
# These are the "time lags"
shifts = np.arange(1, 11).astype(int)
# Use a dictionary comprehension to create name: value pairs, one pair per shift
shifted_data = {"lag_{}_day".format(day_shift): prices_perc.shift(day_shift) for day_shift in shifts}
# Convert into a DataFrame for subsequent use
prices_perc_shifted = pd.DataFrame(shifted_data)
# Plot the first 100 samples of each
ax = prices_perc_shifted.iloc[:100].plot(cmap=plt.cm.viridis)
prices_perc.iloc[:100].plot(color='r', lw=2)
ax.legend(loc='best')
plt.show()
# SPECIAL CASE: AUTO REGRESSIVE MODELS
# Replace missing values with the median for each column
X = prices_perc_shifted.fillna(np.nanmedian(prices_perc_shifted))
y = prices_perc.fillna(np.nanmedian(prices_perc))
# Fit the model
model = Ridge()
model.fit(X, y)
# VISUALIZE REGRESSION COEFFICIENTS (SHOWS N-1TH DAY HAS GRAETEST EFECT ON NTH DAY)
def visualize_coefficients(coefs, names, ax):
# Make a bar plot for the coefficients, including their names on the x-axis
ax.bar(names, coefs)
ax.set(xlabel='Coefficient name', ylabel='Coefficient value')
# Set formatting so it looks nice
plt.setp(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
return ax
# Visualize the output data up to "2011-01"
fig, axs = plt.subplots(2, 1, figsize=(10, 5))
y.loc[:'2011-01'].plot(ax=axs[0])
# Run the function to visualize model's coefficients
visualize_coefficients(model.coef_, prices_perc_shifted.columns, ax=axs[1])
plt.show()
# AUTO REGRESSION WITH A SMOOTHER TIME SERIES
# Visualize the output data up to "2011-01"
fig, axs = plt.subplots(2, 1, figsize=(10, 5))
y.loc[:'2011-01'].plot(ax=axs[0])
# Run the function to visualize model's coefficients
visualize_coefficients(model.coef_, prices_perc_shifted.columns, ax=axs[1])
plt.show()
# VISUALIZING CROSS VALIDATION WITH SHUFFLING
# Import ShuffleSplit and create the cross-validation object
from sklearn.model_selection import ShuffleSplit
cv = ShuffleSplit(n_splits=10, random_state=1)
# Iterate through CV splits
results = []
for tr, tt in cv.split(X, y):
# Fit the model on training data
model.fit(X[tr], y[tr])
# Generate predictions on the test data, score the predictions, and collect
prediction = model.predict(X[tt])
score = r2_score(y[tt], prediction)
results.append((prediction, score, tt))
# Custom function to quickly visualize predictions
visualize_predictions(results)
# VISUALIZING CROSS VALIDATION WITHOUT SHUFFLING
# Create KFold cross-validation object
from sklearn.model_selection import KFold
cv = KFold(n_splits=10, shuffle=False, random_state=1)
# Iterate through CV splits
results = []
for tr, tt in cv.split(X, y):
# Fit the model on training data
model.fit(X[tr],y[tr])
# Generate predictions on the test data and collect
prediction = model.predict(X[tt])
results.append((prediction, tt))
# Custom function to quickly visualize predictions
visualize_predictions(results)
# VISUALIZE TIME BASED CROSS VALIDATION
# Import TimeSeriesSplit
from sklearn.model_selection import TimeSeriesSplit
# Create time-series cross-validation object
cv = TimeSeriesSplit(n_splits= 10)
# Iterate through CV splits
fig, ax = plt.subplots()
for ii, (tr, tt) in enumerate(cv.split(X, y)):
# Plot the training data on each iteration, to see the behavior of the CV
ax.plot(tr, ii + y[tr])
ax.set(title='Training data on each CV iteration', ylabel='CV iteration')
plt.show()
# BOOTSTRAPPING A CONFIDENCE INTERVAL
from sklearn.utils import resample
def bootstrap_interval(data, percentiles=(2.5, 97.5), n_boots=100):
"""Bootstrap a confidence interval for the mean of columns of a 2-D dataset."""
# Create our empty array to fill the results
bootstrap_means = np.zeros([n_boots, data.shape[-1]])
for ii in range(n_boots):
# Generate random indices for our data *with* replacement, then take the sample mean
random_sample = resample(data)
bootstrap_means[ii] = random_sample.mean(axis=0)
# Compute the percentiles of choice for the bootstrapped means
percentiles = np.percentile(bootstrap_means, percentiles, axis=0)
return percentiles
# CALCULATING VARIABILITY INMODEL COEFFICIENTS
# Iterate through CV splits
n_splits = 100
cv = TimeSeriesSplit(n_splits=n_splits)
# Create empty array to collect coefficients
coefficients = np.zeros([n_splits, X.shape[1]])
for ii, (tr, tt) in enumerate(cv.split(X, y)):
# Fit the model on training data and collect the coefficients
model.fit(X[tr], y[tr])
coefficients[ii] = model.coef_
# Calculate a confidence interval around each coefficient
bootstrapped_interval = bootstrap_interval(coefficients)
# Plot it
fig, ax = plt.subplots()
ax.scatter(feature_names, bootstrapped_interval[0], marker='_', lw=3)
ax.scatter(feature_names, bootstrapped_interval[1], marker='_', lw=3)
ax.set(title='95% confidence interval for model coefficients')
plt.setp(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
plt.show()
# VISUALIZE MODEL SCORE VARIABILITY OVER TIME (to see if predictions get better over time)
from sklearn.model_selection import cross_val_score
# Generate scores for each split to see how the model performs over time
scores = cross_val_score(model, X, y, cv=cv, scoring=my_pearsonr)
# Convert to a Pandas Series object
scores_series = pd.Series(scores, index=times_scores, name='score')
# Bootstrap a rolling confidence interval for the mean score
scores_lo = scores_series.rolling(20).aggregate(partial(bootstrap_interval, percentiles=2.5))
scores_hi = scores_series.rolling(20).aggregate(partial(bootstrap_interval, percentiles=97.5))
# Plot the results
fig, ax = plt.subplots()
scores_lo.plot(ax=ax, label="Lower confidence interval")
scores_hi.plot(ax=ax, label="Upper confidence interval")
ax.legend()
plt.show()
# ACCOUNTING FOR NON-STATIONARITY
# Pre-initialize window sizes
window_sizes = [25, 50, 75, 100]
# Create an empty DataFrame to collect the stores
all_scores = pd.DataFrame(index=times_scores)
# Generate scores for each split to see how the model performs over time
for window in window_sizes:
# Create cross-validation object using a limited lookback window
cv = TimeSeriesSplit(n_splits=100, max_train_size=window)
# Calculate scores across all CV splits and collect them in a DataFrame
this_scores = cross_val_score(model, X, y, cv=cv, scoring=my_pearsonr)
all_scores['Length {}'.format(window)] = this_scores
# Visualize the scores
ax = all_scores.rolling(10).mean().plot(cmap=plt.cm.coolwarm)
ax.set(title='Scores for multiple windows', ylabel='Correlation (r)')
plt.show()
```
|
github_jupyter
|
# PLOTTING A TIME SERIES 1
# Print the first 5 rows of data
print(data.head())
# Print the first 5 rows of data2
print(data2.head())
# Plot the time series in each dataset
fig, axs = plt.subplots(2, 1, figsize=(5, 10))
data.iloc[:1000].plot(y="data_values", ax=axs[0])
data2.iloc[:1000].plot(y="data_values", ax=axs[1])
plt.show()
# PLOTTING A TIME SERIES 2
# Plot the time series in each dataset
fig, axs = plt.subplots(2, 1, figsize=(5, 10))
data.iloc[:1000].plot(x='time', y='data_values', ax=axs[0])
data2.iloc[:1000].plot(x='time', y='data_values', ax=axs[1])
plt.show()
# FITTING A SIMPLE MODEL
# Print the first 5 rows for inspection
print(data.head())
from sklearn.svm import LinearSVC
# Construct data for the model
X = data[['petal length (cm)', 'petal width (cm)']]
y = data[['target']]
# Fit the model
model = LinearSVC()
model.fit(X, y)
# PREDICTING USING A CLASSIFICATION MODEL
# Create input array
X_predict = targets[['petal length (cm)', 'petal width (cm)']]
# Predict with the model
predictions = model.predict(X_predict)
print(predictions)
# Visualize predictions and actual values
plt.scatter(X_predict['petal length (cm)'], X_predict['petal width (cm)'],
c=predictions, cmap=plt.cm.coolwarm)
plt.title("Predicted class values")
plt.show()
# FITTING A SIMPLE MODEL REGRESSION
from sklearn import linear_model
# Prepare input and output DataFrames
X = boston[['AGE']]
y = boston[['RM']]
# Fit the model
model = linear_model.LinearRegression()
model.fit(X, y)
# PREDICTING USING REGRESSION and RESHAPE
# Generate predictions with the model using those inputs
predictions = model.predict(new_inputs.reshape([-1, 1]))
# Visualize the inputs and predicted values
plt.scatter(new_inputs, predictions, color='r', s=3)
plt.xlabel('inputs')
plt.ylabel('predictions')
plt.show()
# INSPECTING AUDIO DATA
import librosa as lr
from glob import glob
# List all the wav files in the folder
audio_files = glob(data_dir + '/*.wav')
# Read in the first audio file, create the time array
audio, sfreq = lr.load(audio_files[0])
time = np.arange(0, len(audio)) / sfreq
# Plot audio over time
fig, ax = plt.subplots()
ax.plot(time, audio)
ax.set(xlabel='Time (s)', ylabel='Sound Amplitude')
plt.show()
# INSPECTING REGRESSION DATA
# Read in the data
data = pd.read_csv('prices.csv', index_col=0)
# Convert the index of the DataFrame to datetime
data.index = pd.to_datetime(data.index)
print(data.head())
# Loop through each column, plot its values over time
fig, ax = plt.subplots()
for column in data:
data[column].plot(ax=ax, label=column)
ax.legend()
plt.show()
# AVERAGING ACROSS TIMESERIES (abnormal vs normal heartbeats)
fig, axs = plt.subplots(3, 2, figsize=(15, 7), sharex=True, sharey=True)
# Calculate the time array
time = np.arange(normal.shape[0]) / sfreq
# Stack the normal/abnormal audio so you can loop and plot
stacked_audio = np.hstack([normal, abnormal]).T
# Loop through each audio file / ax object and plot
# .T.ravel() transposes the array, then unravels it into a 1-D vector for looping
for iaudio, ax in zip(stacked_audio, axs.T.ravel()):
ax.plot(time, iaudio)
show_plot_and_make_titles()
# AVERAGING ACROSS TIMESERIES (abnormal vs normal heartbeats)
# Average across the audio files of each DataFrame
mean_normal = np.mean(normal, axis=1)
mean_abnormal = np.mean(abnormal, axis=1)
# Plot each average over time
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3), sharey=True)
ax1.plot(time, mean_normal)
ax1.set(title="Normal Data")
ax2.plot(time, mean_abnormal)
ax2.set(title="Abnormal Data")
plt.show()
# BUILD A CLASSIFICATION MODEL
from sklearn.svm import LinearSVC
# Initialize and fit the model
model = LinearSVC()
model.fit(X_train, y_train)
# Generate predictions and score them manually
predictions = model.predict(X_test)
print(sum(predictions == y_test.squeeze()) / len(y_test))
# CALCULATING THE ENVELOPE OF SOUND
# Plot the raw data first
audio.plot(figsize=(10, 5))
plt.show()
# Rectify the audio signal
audio_rectified = audio.apply(np.abs)
# Plot the result
audio_rectified.plot(figsize=(10, 5))
plt.show()
# Smooth by applying a rolling mean
audio_rectified_smooth = audio_rectified.rolling(50).mean()
# Plot the result
audio_rectified_smooth.plot(figsize=(10, 5))
plt.show()
# CALCULATING FEATURES FROM THE ENVELOPE
# Calculate stats
means = np.mean(audio_rectified_smooth, axis=0)
stds = np.std(audio_rectified_smooth, axis=0)
maxs = np.max(audio_rectified_smooth, axis=0)
# Create the X and y arrays
X = np.column_stack([means, stds, maxs])
y = labels.reshape([-1, 1])
# Fit the model and score on testing data
from sklearn.model_selection import cross_val_score
percent_score = cross_val_score(model, X, y, cv=5)
print(np.mean(percent_score))
# DERIVATIVE FEATURES: THE TEMPOGRAM
# Calculate the tempo of the sounds
tempos = []
for col, i_audio in audio.items():
tempos.append(lr.beat.tempo(i_audio.values, sr=sfreq, hop_length=2**6, aggregate=None))
# Convert the list to an array so you can manipulate it more easily
tempos = np.array(tempos)
# Calculate statistics of each tempo
tempos_mean = tempos.mean(axis=-1)
tempos_std = tempos.std(axis=-1)
tempos_max = tempos.max(axis=-1)
# Create the X and y arrays
X = np.column_stack([means, stds, maxs, tempos_mean, tempos_std, tempos_max])
y = labels.reshape([-1, 1])
# Fit the model and score on testing data
percent_score = cross_val_score(model, X, y, cv=5)
print(np.mean(percent_score))
# SPECTROGRAMS OF HEARTBEAT DATA
# Import the stft function
from librosa.core import stft
# Prepare the STFT
HOP_LENGTH = 2**4
spec = stft(audio, hop_length=HOP_LENGTH, n_fft=2**7)
from librosa.core import amplitude_to_db
from librosa.display import specshow
# Convert into decibels
spec_db = amplitude_to_db(spec)
# Compare the raw audio to the spectrogram of the audio
fig, axs = plt.subplots(2, 1, figsize=(10, 10), sharex=True)
axs[0].plot(time, audio)
specshow(spec_db, sr=sfreq, x_axis='time', y_axis='hz', hop_length=HOP_LENGTH)
plt.show()
# ENGINEERING SPECIAL FEATURES
import librosa as lr
# Calculate the spectral centroid and bandwidth for the spectrogram
bandwidths = lr.feature.spectral_bandwidth(S=spec)[0]
centroids = lr.feature.spectral_centroid(S=spec)[0]
from librosa.core import amplitude_to_db
from librosa.display import specshow
# Convert spectrogram to decibels for visualization
spec_db = amplitude_to_db(spec)
# Display these features on top of the spectrogram
fig, ax = plt.subplots(figsize=(10, 5))
ax = specshow(spec_db, x_axis='time', y_axis='hz', hop_length=HOP_LENGTH)
ax.plot(times_spec, centroids)
ax.fill_between(times_spec, centroids - bandwidths / 2, centroids + bandwidths / 2, alpha=.5)
ax.set(ylim=[None, 6000])
plt.show()
# COMBINING MANY FEATURES INTO A CLASSIFIER
# Loop through each spectrogram
bandwidths = []
centroids = []
for spec in spectrograms:
# Calculate the mean spectral bandwidth
this_mean_bandwidth = np.mean(lr.feature.spectral_bandwidth(S=spec))
# Calculate the mean spectral centroid
this_mean_centroid = np.mean(lr.feature.spectral_centroid(S=spec))
# Collect the values
bandwidths.append(this_mean_bandwidth)
centroids.append(this_mean_centroid)
# Create X and y arrays
X = np.column_stack([means, stds, maxs, tempo_mean, tempo_max, tempo_std, bandwidths, centroids])
y = labels.reshape([-1, 1])
# Fit the model and score on testing data
percent_score = cross_val_score(model, X, y, cv=5)
print(np.mean(percent_score))
# INTRO THE DATASET (LINE VS COLOR SCATTERPLOT)
# Plot the raw values over time
prices.plot()
plt.show()
# Scatterplot with one company per axis
prices.plot.scatter('EBAY', 'YHOO')
plt.show()
# Scatterplot with color relating to time
prices.plot.scatter('EBAY', 'YHOO', c=prices.index,
cmap=plt.cm.viridis, colorbar=False)
plt.show()
# FITTING A SIMPLE REGRESSION MODEL
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
# Use stock symbols to extract training data
X = all_prices[['EBAY', 'NVDA', "YHOO"]]
y = all_prices[['AAPL']]
# Fit and score the model with cross-validation
scores = cross_val_score(Ridge(), X, y, cv=3)
print(scores)
# VISUALIZE PREDICTED VALUES
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
# Split our data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=.8, shuffle=False, random_state=1)
# Fit our model and generate predictions
model = Ridge()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
score = r2_score(y_test, predictions)
print(score)
# Visualize our predictions along with the "true" values, and print the score
fig, ax = plt.subplots(figsize=(15, 5))
ax.plot(y_test, color='k', lw=3)
ax.plot(predictions, color='r', lw=2)
plt.show()
# VISUALIZE MESSY DATA
# Visualize the dataset
prices.plot(legend=False)
plt.tight_layout()
plt.show()
# Count the missing values of each time series
missing_values = prices.isnull().sum()
print(missing_values)
# IMPUTE MISSING VALUES
# Create a function we'll use to interpolate and plot
def interpolate_and_plot(prices, interpolation):
# Create a boolean mask for missing values
missing_values = prices.isna()
# Interpolate the missing values
prices_interp = prices.interpolate(interpolation)
# Plot the results, highlighting the interpolated values in black
fig, ax = plt.subplots(figsize=(10, 5))
prices_interp.plot(color='k', alpha=.6, ax=ax, legend=False)
# Now plot the interpolated values on top in red
prices_interp[missing_values].plot(ax=ax, color='r', lw=3, legend=False)
plt.show()
# Interpolate using the latest non-missing value
interpolation_type = 'zero'
interpolate_and_plot(prices, interpolation_type)
# Interpolate linearly
interpolation_type = 'linear'
interpolate_and_plot(prices, interpolation_type)
# Interpolate with a quadratic function
interpolation_type = 'quadratic'
interpolate_and_plot(prices, interpolation_type)
# TRANSFORMING RAW DATA
# Your custom function
def percent_change(series):
# Collect all *but* the last value of this window, then the final value
previous_values = series[:-1]
last_value = series[-1]
# Calculate the % difference between the last value and the mean of earlier values
percent_change = (last_value - np.mean(previous_values)) / np.mean(previous_values)
return percent_change
# Apply your custom function and plot
prices_perc = prices.rolling(20).aggregate(percent_change)
prices_perc.loc["2014":"2015"].plot()
plt.show()
# HANDLING OUTLIERS
def replace_outliers(series):
# Calculate the absolute difference of each timepoint from the series mean
absolute_differences_from_mean = np.abs(series - np.mean(series))
# Calculate a mask for the differences that are > 3 standard deviations from zero
this_mask = absolute_differences_from_mean > (np.std(series) * 3)
# Replace these values with the median accross the data
series[this_mask] = np.nanmedian(series)
return series
# Apply your preprocessing function to the timeseries and plot the results
prices_perc = prices_perc.apply(replace_outliers)
prices_perc.loc["2014":"2015"].plot()
plt.show()
# ENGINEERING MULTIPLE ROLLING FEATURES AT ONCE
# Define a rolling window with Pandas, excluding the right-most datapoint of the window
prices_perc_rolling = prices_perc.rolling(20, min_periods=5, closed='right')
# Define the features you'll calculate for each window
features_to_calculate = [np.min, np.max, np.mean, np.std]
# Calculate these features for your rolling window object
features = prices_perc_rolling.aggregate(features_to_calculate)
# Plot the results
ax = features.loc[:"2011-01"].plot()
prices_perc.loc[:"2011-01"].plot(ax=ax, color='k', alpha=.2, lw=3)
ax.legend(loc=(1.01, .6))
plt.show()
# PERCENTILES AND PARTIAL FUNCTIONS
# Import partial from functools
from functools import partial
percentiles = [1, 10, 25, 50, 75, 90, 99]
# Use a list comprehension to create a partial function for each quantile
percentile_functions = [partial(np.percentile, q=percentile) for percentile in percentiles]
# Calculate each of these quantiles on the data using a rolling window
prices_perc_rolling = prices_perc.rolling(20, min_periods=5, closed='right')
features_percentiles = prices_perc_rolling.aggregate(percentile_functions)
# Plot a subset of the result
ax = features_percentiles.loc[:"2011-01"].plot(cmap=plt.cm.viridis)
ax.legend(percentiles, loc=(1.01, .5))
plt.show()
# DATETIME DAY WEEK MONTH
# Extract date features from the data, add them as columns
prices_perc['day_of_week'] = prices_perc.index.weekday
prices_perc['week_of_year'] = prices_perc.index.week
prices_perc['month_of_year'] = prices_perc.index.month
# Print prices_perc
print(prices_perc)
# CREATING TIME SHIFTED FEATURES
# These are the "time lags"
shifts = np.arange(1, 11).astype(int)
# Use a dictionary comprehension to create name: value pairs, one pair per shift
shifted_data = {"lag_{}_day".format(day_shift): prices_perc.shift(day_shift) for day_shift in shifts}
# Convert into a DataFrame for subsequent use
prices_perc_shifted = pd.DataFrame(shifted_data)
# Plot the first 100 samples of each
ax = prices_perc_shifted.iloc[:100].plot(cmap=plt.cm.viridis)
prices_perc.iloc[:100].plot(color='r', lw=2)
ax.legend(loc='best')
plt.show()
# SPECIAL CASE: AUTO REGRESSIVE MODELS
# Replace missing values with the median for each column
X = prices_perc_shifted.fillna(np.nanmedian(prices_perc_shifted))
y = prices_perc.fillna(np.nanmedian(prices_perc))
# Fit the model
model = Ridge()
model.fit(X, y)
# VISUALIZE REGRESSION COEFFICIENTS (SHOWS N-1TH DAY HAS GRAETEST EFECT ON NTH DAY)
def visualize_coefficients(coefs, names, ax):
# Make a bar plot for the coefficients, including their names on the x-axis
ax.bar(names, coefs)
ax.set(xlabel='Coefficient name', ylabel='Coefficient value')
# Set formatting so it looks nice
plt.setp(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
return ax
# Visualize the output data up to "2011-01"
fig, axs = plt.subplots(2, 1, figsize=(10, 5))
y.loc[:'2011-01'].plot(ax=axs[0])
# Run the function to visualize model's coefficients
visualize_coefficients(model.coef_, prices_perc_shifted.columns, ax=axs[1])
plt.show()
# AUTO REGRESSION WITH A SMOOTHER TIME SERIES
# Visualize the output data up to "2011-01"
fig, axs = plt.subplots(2, 1, figsize=(10, 5))
y.loc[:'2011-01'].plot(ax=axs[0])
# Run the function to visualize model's coefficients
visualize_coefficients(model.coef_, prices_perc_shifted.columns, ax=axs[1])
plt.show()
# VISUALIZING CROSS VALIDATION WITH SHUFFLING
# Import ShuffleSplit and create the cross-validation object
from sklearn.model_selection import ShuffleSplit
cv = ShuffleSplit(n_splits=10, random_state=1)
# Iterate through CV splits
results = []
for tr, tt in cv.split(X, y):
# Fit the model on training data
model.fit(X[tr], y[tr])
# Generate predictions on the test data, score the predictions, and collect
prediction = model.predict(X[tt])
score = r2_score(y[tt], prediction)
results.append((prediction, score, tt))
# Custom function to quickly visualize predictions
visualize_predictions(results)
# VISUALIZING CROSS VALIDATION WITHOUT SHUFFLING
# Create KFold cross-validation object
from sklearn.model_selection import KFold
cv = KFold(n_splits=10, shuffle=False, random_state=1)
# Iterate through CV splits
results = []
for tr, tt in cv.split(X, y):
# Fit the model on training data
model.fit(X[tr],y[tr])
# Generate predictions on the test data and collect
prediction = model.predict(X[tt])
results.append((prediction, tt))
# Custom function to quickly visualize predictions
visualize_predictions(results)
# VISUALIZE TIME BASED CROSS VALIDATION
# Import TimeSeriesSplit
from sklearn.model_selection import TimeSeriesSplit
# Create time-series cross-validation object
cv = TimeSeriesSplit(n_splits= 10)
# Iterate through CV splits
fig, ax = plt.subplots()
for ii, (tr, tt) in enumerate(cv.split(X, y)):
# Plot the training data on each iteration, to see the behavior of the CV
ax.plot(tr, ii + y[tr])
ax.set(title='Training data on each CV iteration', ylabel='CV iteration')
plt.show()
# BOOTSTRAPPING A CONFIDENCE INTERVAL
from sklearn.utils import resample
def bootstrap_interval(data, percentiles=(2.5, 97.5), n_boots=100):
"""Bootstrap a confidence interval for the mean of columns of a 2-D dataset."""
# Create our empty array to fill the results
bootstrap_means = np.zeros([n_boots, data.shape[-1]])
for ii in range(n_boots):
# Generate random indices for our data *with* replacement, then take the sample mean
random_sample = resample(data)
bootstrap_means[ii] = random_sample.mean(axis=0)
# Compute the percentiles of choice for the bootstrapped means
percentiles = np.percentile(bootstrap_means, percentiles, axis=0)
return percentiles
# CALCULATING VARIABILITY INMODEL COEFFICIENTS
# Iterate through CV splits
n_splits = 100
cv = TimeSeriesSplit(n_splits=n_splits)
# Create empty array to collect coefficients
coefficients = np.zeros([n_splits, X.shape[1]])
for ii, (tr, tt) in enumerate(cv.split(X, y)):
# Fit the model on training data and collect the coefficients
model.fit(X[tr], y[tr])
coefficients[ii] = model.coef_
# Calculate a confidence interval around each coefficient
bootstrapped_interval = bootstrap_interval(coefficients)
# Plot it
fig, ax = plt.subplots()
ax.scatter(feature_names, bootstrapped_interval[0], marker='_', lw=3)
ax.scatter(feature_names, bootstrapped_interval[1], marker='_', lw=3)
ax.set(title='95% confidence interval for model coefficients')
plt.setp(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
plt.show()
# VISUALIZE MODEL SCORE VARIABILITY OVER TIME (to see if predictions get better over time)
from sklearn.model_selection import cross_val_score
# Generate scores for each split to see how the model performs over time
scores = cross_val_score(model, X, y, cv=cv, scoring=my_pearsonr)
# Convert to a Pandas Series object
scores_series = pd.Series(scores, index=times_scores, name='score')
# Bootstrap a rolling confidence interval for the mean score
scores_lo = scores_series.rolling(20).aggregate(partial(bootstrap_interval, percentiles=2.5))
scores_hi = scores_series.rolling(20).aggregate(partial(bootstrap_interval, percentiles=97.5))
# Plot the results
fig, ax = plt.subplots()
scores_lo.plot(ax=ax, label="Lower confidence interval")
scores_hi.plot(ax=ax, label="Upper confidence interval")
ax.legend()
plt.show()
# ACCOUNTING FOR NON-STATIONARITY
# Pre-initialize window sizes
window_sizes = [25, 50, 75, 100]
# Create an empty DataFrame to collect the stores
all_scores = pd.DataFrame(index=times_scores)
# Generate scores for each split to see how the model performs over time
for window in window_sizes:
# Create cross-validation object using a limited lookback window
cv = TimeSeriesSplit(n_splits=100, max_train_size=window)
# Calculate scores across all CV splits and collect them in a DataFrame
this_scores = cross_val_score(model, X, y, cv=cv, scoring=my_pearsonr)
all_scores['Length {}'.format(window)] = this_scores
# Visualize the scores
ax = all_scores.rolling(10).mean().plot(cmap=plt.cm.coolwarm)
ax.set(title='Scores for multiple windows', ylabel='Correlation (r)')
plt.show()
| 0.815306 | 0.972257 |
```
import numpy as np
```
# Estimating income processes
Consider $N$ households indexed by $i$ who are in the labor market for $T$ periods indexed by $t \in \{1,2,\dots,T\}$.
Their **wage income** is stochastic and given by
$$
\begin{aligned}
P_{i,t} &= \psi_{i,t} P_{i,t-1},\\
\tilde{Y}_{i,t} &= \xi_{i,t} P_{i,t},\\
Y_{i,t} &= \begin{cases}
0 & \text{if } \mu_{i,t} < \pi\\
\tilde{Y}_{i,t} & \text{else}
\end{cases} \\
\psi_{i,t} &\sim \text{LogNormal}(-0.5\sigma_{\psi}^2,\sigma_{\psi})\\
\xi_{i,t} &\sim \text{LogNormal}(-0.5\sigma_{\xi}^2,\sigma_{\xi})\\
\mu_{i,t} &\sim \text{Uniform}(0,1)\\
P_{0} &= 1
\end{aligned}
$$
where
- $\sigma_{\psi}$ is the standard deviation of the *permanent* shocks, $\psi_{i,t}$
- $\sigma_{\xi}$ is the standard deviation of the *transitory* shocks, $\xi_{i,t}$
- $\pi$ is the risk of unemployment.
**The data you have access to is:**
```
dataY = np.load('dataY.npy')
T,N = dataY.shape
```
**Question 1:** Calculate income growth rates as log-changes
$$
\Delta \log(Y_{i,t}) \equiv \begin{cases}
\log(Y_{i,t})-\log(Y_{i,t-1}) & \text{if } Y_{i,t}>0 \text{ and } Y_{i,t-1}>0\\
\text{NaN} & \text{else}
\end{cases}
$$
where $\text{NaN}$ is *not-a-number* (i.e. `np.nan`).
**Question 2:** Calculate the following 3 statistics from the data
- $s_1^{data}$: Share of observations with $Y_{i,t} = 0$
- $s_2^{data}$: Variance of income growth rate, $\text{Var}(\Delta\log{Y_{i,t}})$
- $s_3^{data}$: Co-variance of income growth rates one period apart, $\text{Cov}(\Delta\log{Y_{i,t}},\Delta\log{Y_{i,t-1}})$
**Question 3:** Simulate the income process using your own choice of $\sigma_{\psi}$, $\sigma_{\xi}$, $\pi$, $T$ and $N$. Calculate the 3 same statistics. Compare with the data statistics.
- $s_1^{sim}(\sigma_{\psi},\sigma_{\xi},\pi)$: Share of observations with $Y_{i,t} = 0$
- $s_2^{sim}(\sigma_{\psi},\sigma_{\xi},\pi)$: Variance of income growth $\text{Var}(\Delta\log{Y_{i,t}})$
- $s_3^{sim}(\sigma_{\psi},\sigma_{\xi},\pi)$: Co-variance of income growth one periods apart $\text{Cov}(\Delta\log{Y_{i,t}},\Delta\log{Y_{i,t-1}})$
**Question 4:** Solve the following minimization problem to estimate $\sigma_{\psi}$, $\sigma_{\xi}$ and $\pi$
$$
\sigma_{\psi}^{\ast},\sigma_{\xi}^{\ast},\pi^{\ast} = \arg \min_{\sigma_{\psi}\geq0,\sigma_{\xi}\geq0,\pi\in[0,1]} (s_1^{sim}-s_1^{data})^2 + (s_2^{sim}-s_2^{data})^2 + (s_3^{sim}-s_3^{data})^2
$$
where for each new guess of $\sigma_{\psi}$, $\sigma_{\xi}$, and $\pi$ you should be re-simulating the data *with the same seed* and re-calculating the 3 statistics.
```
def objective():
pass
# res = optimize.minimize(objective,x,method='L-BFGS-B',bounds=?,args=?,options={'eps':1e-4})
# hint: options={'eps':1e-4} uses a larger step-size when approximating the jacbian, which is useful in this case
```
# Wealth in the utility function
In the final period, $t=T$, the household solves the following problem
$$
\begin{aligned}
v_{T}(a_{T-1}) & = \max_{c_{T}} \frac{c_T^{1-\rho}}{1-\rho} + \kappa \frac{(a_T+\underline{a})^{1-\sigma}}{1-\sigma}
\\ & \text{s.t.} & \\
a_{T}& = (1+r)a_{T-1} + y - c_T
\end{aligned}
$$
where
- $a_t$ is end-of-period assets in period $t$
- $c_t$ is consumption in period $t$
- $\rho$ is the CRRA-coefficient for consumption utility
- $\sigma$ is the CRRA-coefficient for wealth utility
- $\underline{a}$ is an *additive* scaling factor for wealth utility
- $\kappa$ is a *multiplicative* scaling factor for wealth utility
- $r$ is the rate of return
- $y$ is income
The optimal **consumption function** is denoted $c_t^{\ast}(a_{t-1})$.
The optimal **savings function** is denoted $a_t^{\ast}(a_{t-1}) \equiv (1+r)a_{t-1} + y - c_t^{\ast}(a_{t-1})$.
```
# a. parameters
rho = 2.0
sigma = 1.2
kappa = 0.6
a_ubar = 2.0
r = 0.04
y = 1.0
# b. grids
a_lag_vec = np.linspace(0,300,300)
```
**Question 1:** Find and plot the functions $v_{T}(a_{T-1})$, $c_T^{\ast}(a_{T-1})$, and $a_T^{\ast}(a_{T-1})$.
In all periods *before the last*, $t < T$, the household solves:
$$
\begin{aligned}
v_{t}(a_{t-1}) & = \max_{c_{t}} \frac{c_t^{1-\rho}}{1-\rho} + \kappa \frac{(a_t+\underline{a})^{1-\sigma}}{1-\sigma} + \beta v_{t+1}(a_t)\\ & \text{s.t.} & \\
a_{t}& = (1+r)a_{t-1} + y - c_t
\end{aligned}
$$
where $\beta$ is the discount factor for future utility.
```
beta = 0.97
T = 20
```
**Question 2:** Find and plot $v_{T-1}(a_{T-2})$ and $c_{T-1}^{\ast}(a_{T-2})$.
**Question 3:** Find $c_t^{\ast}(a_{t-1})$ for $t \in \{0,1,\dots,T\}$ and plot them in a single figure.
Define the saving rate as:
$$
s_t^{\ast}(a_{t-1}) \equiv \frac{a_t - a_{t-1}}{y+ra_{t-1}} = \frac{((1+r)a_{t-1} + y - c_t^{\ast}(a_{t-1})) - a_{T-1}}{y+ra_{t-1}}
$$
**Question 4:** Plot $s_0^{\ast}(a_{-1})$. Do the rich or the poor save the most?
**Question 5:** Can you change the parameter choices such that $s_0^{\ast}(a_{-1})$ is monotonically decreasing in $a_{-1}$?
# Refined grid search
Let $\boldsymbol{x} = \left[\begin{array}{c}
x_1 \\
x_2\\
\end{array}\right]$ be a two-dimensional vector.
Consider the following algorithm:
**Algorithm:** `grid_search()`
**Goal:** Minimize the function $f(\boldsymbol{x})$.
1. Choose a grid size $N$ and minimum and maximum values of $x_1$ and $x_2$ denoted $\overline{x}_1 > \underline{x}_1$ and $\overline{x}_2 > \underline{x}_2$
2. Calculate step-sizes
$$
\begin{aligned}
\Delta_1 &= (\overline{x}_1 - \underline{x}_1)/(N-1)\\
\Delta_2 &= (\overline{x}_2 - \underline{x}_2)/(N-1)
\end{aligned}
$$
3. Find the grid point with the lowest function value by solving
$$
j_1^{\ast},j_2^{\ast} = \arg \min_{j_1\in\{0,...N-1\},j_2\in\{0,...N-1\}} f(\underline{x}_1 + j_1\Delta_1, \underline{x}_2 + j_2\Delta_2)
$$
4. Return $x_1^{\ast} = \underline{x}_1 + j_1^{\ast}\Delta_1$, $x_2^{\ast} = \underline{x}_2 + j_2^{\ast}\Delta_2$ and $f^{\ast} = f(x_1^{\ast},x_2^{\ast})$.
**Question 1:** Implement the `grid_search()` algorithm to minimize the rosen function.
> **Hint:** The global minimum of the rosen function is $0$ at $(1,1)$.
```
def rosen(x):
return (1.0-x[0])**2+2*(x[1]-x[0]**2)**2
def grid_search(f,x1_min,x1_max,x2_min,x2_max,N):
return np.nan,np.nan,np.nan
# settings
x1_min = 0
x1_max = 5
x2_min = 0
x2_max = 4
N = 100
# apply grid search
x1,x2,f = grid_search(rosen,x1_min,x1_max,x2_min,x2_max,N)
print(f'minimum found at ({x1:.8f},{x2:.8f}) with the function value {f:.8f}')
```
Also, consider the following algorithm:
**Algorithm:** `refined_grid_search()`
**Goal:** Minimize the function $f(\boldsymbol{x})$.
1. Choose a grid size $N$ and minimum and maximum values of $x_1$ and $x_2$ denoted $\overline{x}_1 > \underline{x}_1$ and $\overline{x}_2 > \underline{x}_2$, and a refinement-level $K$
2. Set $k=0$
3. If $k > 0$: Update the minimum and maximum values by
$$
\begin{aligned}
\tilde{\Delta}_1 &= 3(\overline{x}_1 - \underline{x}_1)/(N-1)\\
\tilde{\Delta}_2 &= 3(\overline{x}_2 - \underline{x}_2)/(N-1)\\
\underline{x}_1 &= \max(\underline{x}_1,x_1^{\ast}-\tilde{\Delta}_1)\\
\underline{x}_2 &= \max(\underline{x}_2,x_2^{\ast}-\tilde{\Delta}_2)\\
\overline{x}_1 &= \min(\overline{x}_1,x_1^{\ast}+\tilde{\Delta}_1)\\
\overline{x}_2 &= \min(\overline{x}_2,x_2^{\ast}+\tilde{\Delta}_2)
\end{aligned}
$$
4. Apply the `grid_search()` algorithm returning $x_1^{\ast}$, $x_2^{\ast}$ and $f^\ast$
5. Increment $k$ by one
6. If $k < K$ return to step 3 else continue
7. Return $x_1^{\ast}$, $x_2^{\ast}$ and $f^{\ast}$
**Question 2:** Implement the `refined_grid_search()` algorithm to minimize the rosen function.
```
def refined_grid_search(f,x1_min,x1_max,x2_min,x2_max,N,K):
return np.nan,np.nan,np.nan
# more settings
K = 10
# apply refined grid search
x1,x2,f = refined_grid_search(rosen,x1_min,x1_max,x2_min,x2_max,N,K)
print(f'minimum found at ({x1:.8f},{x2:.8f}) with the function value {f:.8f}')
```
|
github_jupyter
|
import numpy as np
dataY = np.load('dataY.npy')
T,N = dataY.shape
def objective():
pass
# res = optimize.minimize(objective,x,method='L-BFGS-B',bounds=?,args=?,options={'eps':1e-4})
# hint: options={'eps':1e-4} uses a larger step-size when approximating the jacbian, which is useful in this case
# a. parameters
rho = 2.0
sigma = 1.2
kappa = 0.6
a_ubar = 2.0
r = 0.04
y = 1.0
# b. grids
a_lag_vec = np.linspace(0,300,300)
beta = 0.97
T = 20
def rosen(x):
return (1.0-x[0])**2+2*(x[1]-x[0]**2)**2
def grid_search(f,x1_min,x1_max,x2_min,x2_max,N):
return np.nan,np.nan,np.nan
# settings
x1_min = 0
x1_max = 5
x2_min = 0
x2_max = 4
N = 100
# apply grid search
x1,x2,f = grid_search(rosen,x1_min,x1_max,x2_min,x2_max,N)
print(f'minimum found at ({x1:.8f},{x2:.8f}) with the function value {f:.8f}')
def refined_grid_search(f,x1_min,x1_max,x2_min,x2_max,N,K):
return np.nan,np.nan,np.nan
# more settings
K = 10
# apply refined grid search
x1,x2,f = refined_grid_search(rosen,x1_min,x1_max,x2_min,x2_max,N,K)
print(f'minimum found at ({x1:.8f},{x2:.8f}) with the function value {f:.8f}')
| 0.566978 | 0.988734 |
```
## CE 295 - Energy Systems and Control
# HW 2 : State Estimation in Geothermal Heat Pump Drilling
# Oski Bear, SID 18681868
# Prof. Moura
# Due Date is written here
# BEAR_OSKI_HW2.ipynb
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy import interp
from scipy import signal
%matplotlib inline
from __future__ import division
import pandas as pd
import control # Read http://python-control.sourceforge.net/manual/
fs = 15 # Font Size for plots
# Drill String Parameters
J_T = 100 # Table/top rotational inertia
J_B = 25 # Bottom/bit rotational inertia
k = 2 # Spring constant
b = 5 # Drag coefficient
```
Problem 1:
- A: Define & write the modeling objective. What are the controllable and uncontrollable inputs? What are the measured and performance outputs? List the parameters.
- Objective: The modeling objective is to estimate the drill bit velocity
- State Variable, `x`:
- w_T, viscous drag, top
- w_B, viscous drag, bottom
- $\theta_T$
- $\theta_B$
- Controllable Inputs, `u`:
- T, Torque
- uncontrollable Inputs, $\omega$:
- T_f,
- Measured Outputs, `y`:
- Table/Top rotation, w_T
- Performance Outputs, `z`:
- $\omega_B$
- Parameters, $\theta$:
- b, coeff of drag
- k, spring coeff (**?**)
- J_T
- J_B
- B: Use Newton’s second law in rotational coordinates to derive the equations of motion for the top/table and bottom/bit portions of the drill string. HINT: Fig 2 is a free body diagram.
- $\frac{d\omega_T}{dt} = {\tau(t) - b\omega_{T}(t) - k [\theta_{T}(t)- \theta_{B}(t)]}\frac{1}{J_T}$
- $\frac{d\omega_B}{dt} = {-\tau(t) - b\omega_{B}(t) - k [\theta_{T}(t)- \theta_{B}(t)]}\frac{1}{J_B}$
- $\frac{d\theta_T}{dt} = \omega_T$
- $\frac{d\theta_B}{dt} = \omega_B$
- C: Write all the dynamical equations into matrix state space form. What are the A, B, C matrices? Hint: A ∈ R4×4
$$
\frac{d}{dt}
\begin{bmatrix} \omega_T \\ \omega_B \\ \theta_T \\ \theta_B\end{bmatrix}
=
\begin{bmatrix} \frac{-b}{J_T} & 0 & \frac{-k}{J_T} & \frac{k}{J_T} \\ 0 & \frac{-b}{J_B} & \frac{k}{J_B} & \frac{-k}{J_B} \\ 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \end{bmatrix}
\begin{bmatrix} \\ \omega_T \\ \omega_B \\ \theta_T \\ \theta_B \end{bmatrix}
+
\begin{bmatrix} \frac{1}{J_T} & 0\\ 0 & \frac{-1}{J_B} \\ 0 & 0 \\ 0 & 0 \end{bmatrix}
\begin{bmatrix} \tau \\ \tau_f \end{bmatrix}
$$
$C = \omega_T = \begin{bmatrix}1 & 0 & 0 & 0\end{bmatrix}$
Problem 2:
A:
- $O = \begin{bmatrix} C \\
CA \\
CA^{2}\\
CA^{3}
\end{bmatrix}$
- Because rank(O) = 3 < than the n states, not all states are observable.
B:
$\frac{d}{dt}
\begin{bmatrix} \omega_T \\ \omega_B \\ \theta\end{bmatrix}
=
\begin{bmatrix} \frac{-b}{J_T} & 0 & \frac{k}{J_T} \\
0 & \frac{-b}{J_B} & \frac{-k}{J_B} \\
1 & -1 & 0
\end{bmatrix}
\begin{bmatrix} \omega_T \\
\omega_B \\
\theta
\end{bmatrix}
+
\begin{bmatrix} \frac{1}{J_T} & 0 \\
0 & \frac{-1}{J_B} \\
0 & 0
\end{bmatrix}
\begin{bmatrix} \tau \\
\tau_f
\end{bmatrix}
$
- $C = \omega_T = \begin{bmatrix}1 & 0 & 0 & 0\end{bmatrix}$
C:
- $O = \begin{bmatrix} C \\
CA \\
CA^{2}\\
\end{bmatrix}$
- Now that rank(O) = 3 == n states, the system is now considered observable
```
## Problem 2 - Observability Analysis
# State space matrices
A4 = np.matrix([[-b/J_T,0,-k/J_T,k/J_T],
[0,-b/J_B,k/J_B,-k/J_B],
[1,0,0,0],
[0,1,0,0]])
B4 = np.matrix([[1/J_T,0],
[0,-1/J_B],
[0,0],
[0,0]])
C4 = np.matrix([[1,0,0,0]])
# Compute observability Matrix for 4-state system and rank
O4 = control.obsv(A4,C4)
print('Rank of Observability Matrix for four-state system')
print(np.linalg.matrix_rank(O4))
# New A Matrix, for 3-state system
A = np.matrix([[-b/J_T,0,-k/J_T],
[0,-b/J_B,k/J_B],
[1,-1,0]
])
B = np.matrix([[1/J_T],
[0],
[0]
])
C = np.matrix([[0,1,0]])
D = np.matrix([0]) #Add empty D for Q4
# Observability Matrix for 3-state system and rank
O = control.obsv(A,C)
print('Rank of Observability Matrix for three-state system')
print(np.linalg.matrix_rank(O))
## Load Data
data=np.asarray(pd.read_csv("HW2_Data.csv",header=None))
t = data[:,0] # t : time vector [sec]
y_m = data[:,1] # y_m : measured table velocity [radians/sec]
Torq = data[:,2] # Torq: table torque [N-m]
omega_B_true = data[:,3] # \omega_B : true rotational speed of bit [radians/sec]
# Plot Data
plt.figure(num=1, figsize=(8, 9), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(2,1,1)
plt.plot(t, Torq)
plt.ylabel('Torque [N-m]')
plt.xlabel('Time [sec]')
plt.title('Torque vs Time')
# Plot table torque
plt.subplot(2,1,2)
plt.plot(t, y_m, color='g')
plt.ylabel('Velocity [rads/sec]')
plt.xlabel('Time [sec]')
plt.title('Measured Table Velocity vs Time')
plt.tight_layout()
# Plot measured table velocity
plt.show()
```
Problem 4:
A: Re(Eigenvalues of A): -0.08322949,-0.08338525,-0.08338525
$
\dot{\hat{x}} = A\hat{x}(t) + Bu(t) + L[y(t) - \hat{y}(t)] \\
\hat{y} = C\dot{\hat{x}}(t) + Du(t) \\
$
simplify $\dot{\hat{x}}$
1. Distribute L, subsitute in full form of $\hat{y}(t)$:
$
\dot{\hat{x}} = A\hat{x}(t) + Bu(t) + Ly(t) - L\hat{y}(t) \\
\dot{\hat{x}} = A\hat{x}(t) + Bu(t) + Ly(t) - L[C\dot{\hat{x}}(t) + Du(t)]
$
2. Distribute L again:
$
\dot{\hat{x}} = A\hat{x}(t) - LC\dot{\hat{x}}(t) + Bu(t) - LDu(t)] + Ly(t)
$
B:
- Re(Eigenvalues of A): [-0.08322949,-0.08338525,-0.08338525]
- Re(Selected Eigenvalues): $\lambda_i = $ [-0.4993769, -0.50031153,-0.50031153]
- derived from the original eigenvalues * 6, in line with the "general rule of thumb" that they should be ~2-12x faster than the slowest. If too fast, they will amplify noise.
C:
- using the equations derived in 4.A:
$$
\begin{bmatrix} \dot{\hat{x}} \end{bmatrix}
= \begin{bmatrix} A-LC \end{bmatrix} \begin{bmatrix} \hat{x} \end{bmatrix} +
\begin{bmatrix}B-LD,L\end{bmatrix}\begin{bmatrix}u \\ y \end{bmatrix}
$$
$$
\begin{bmatrix} \hat{y} \end{bmatrix} = \begin{bmatrix} C \end{bmatrix}\begin{bmatrix} \hat{x} \end{bmatrix} + \begin{bmatrix} D \end{bmatrix} \begin{bmatrix} u \\ y \end{bmatrix}
$$
D: see plot below
```
## Problem 4 - Luenberger Observer
# Eigenvalues of open-loop system
print('Eigenvalues of open-loop system:')
lam_A,evec = np.linalg.eig(A)
print(lam_A)
# Desired poles of estimation error system
# They should have negative real parts
# Complex conjugate pairs
lam_luen = lam_A * 6
# Compute observer gain (See Remark 3.1 in Notes. Use "acker" command)
L = control.acker(A.T,C.T,lam_luen).T
# State-space Matrices for Luenberger Observer
A_lobs = (A - L*C)
B_lobs = np.hstack((B - L*D, L))#TODO HELP
C_lobs = C
D_lobs = np.matrix([[0,0]])
sys_lobs = signal.lti(A_lobs,B_lobs,C_lobs,D_lobs)
# Inputs to observer
u = np.array([Torq, y_m]).T
# Initial Conditions
x_hat0 = [0,0,0]
# Simulate Response
tsim, y, x_hat = signal.lsim(sys_lobs, U=u, T=t, X0=x_hat0)
# Parse states
theta_hat = x_hat[:,2]
omega_T_hat = x_hat[:,0]
omega_B_hat = x_hat[:,1]
#Add RMS
luen_est_err = omega_B_true-omega_B_hat
RMSE = np.sqrt(np.mean(np.power(omega_B_true-omega_B_hat,2)))
print('Luenberger RMSE: ' + str(RMSE) + ' rad/s')
# Plot Results
plt.figure(num=1, figsize=(8, 9), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(2,1,1)
# Plot true and estimated bit velocity
plt.plot(t,omega_B_true, 'C0',label='Bit Velocity')
plt.plot(t,omega_B_hat, 'C1', label='Est. Bit Velocity')
plt.xlabel('Velocity [rad/sec]')
plt.ylabel('Time [sec]')
plt.title('True vs Estimated Bit Velocity (Luenberger Observer)')
plt.legend()
plt.subplot(2,1,2)
# Plot error between true and estimated bit velocity
plt.plot(t,luen_est_err, 'C2')
plt.xlabel('Velocity [rad/sec]')
plt.ylabel('Time [sec]')
plt.title('True vs Estimated Error rate')
plt.show()
```
Problem 5:
- A: See Ch2.4.45-48
- B: Using the identity matrix as a starting point, I simply tuned by testing different values till I found the lowest RMSE, while checking against the graphs to spot check for sanity. I settled on 0.005 as a reasonable value.
- C: See plots below
- D: `Re(Selected Luenberger Eigenvalues): λi= -0.4993769, -0.50031153,-0.50031153` vs `Re(KF Eigenvalues): λi= -0.49937695, -0.50031153, -0.50031153.`They are nearly exactly the same, which makes sense since if the system is asymptotically stable, KF should work towards these Eigenvalues with enough time.
```
## Problem 5 - Kalman Filter
# Noise Covariances
W = .0005 * np.identity(3)# Should be 3x3 because of the # of x states
N = .02
Sig0 = np.identity(3)
# Initial Condition
x_hat0 = [0,0,0]
states0 = np.r_[x_hat0, np.squeeze(np.asarray(Sig0.reshape(9,1)))]
# Ordinary Differential Equation for Kalman Filter
def ode_kf(z,it):
# Parse States
x_hat = np.matrix(z[:3]).T
Sig = np.matrix((z[3:]).reshape(3,3))
# Interpolate input signal data
iTorq = interp(it, t, Torq)
iy_m = interp(it, t, y_m)
# Compute Kalman Gain
L = Sig * C.T * (1/N)
# Kalman Filter
x_hat_dot = A * x_hat + B * iTorq + L * (iy_m - (C * x_hat))
# Riccati Equation
Sig_dot = Sig * A.T + A * Sig + W - Sig * C.T * (1/N) * C * Sig
# Concatenate LHS
z_dot = np.r_[x_hat_dot, Sig_dot.reshape(9,1)]
return(np.squeeze(np.asarray(z_dot)))
# Integrate Kalman Filter ODEs
z = odeint(ode_kf, states0, t)
# Parse States
theta_hat = z[:,2]
omega_T_hat = z[:,0]
omega_B_hat = z[:,1]
Sig33 = z[:,11] # Parse out the (3,3) element of Sigma only!
omega_B_tilde = omega_B_true - omega_B_hat
omega_B_hat_upperbound = omega_B_hat + np.sqrt(Sig33)
omega_B_hat_lowerbound = omega_B_hat - np.sqrt(Sig33)
RMSE = np.sqrt(np.mean(np.power(omega_B_tilde,2)))
print('Kalman Filter RMSE: ' + str(RMSE) + ' rad/s')
# Plot Results
plt.figure(num=3, figsize=(8, 9), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(2,1,1)
# Plot true and estimated bit velocity
plt.plot(t,omega_B_true,'C0', label='True Bit Velocity')
plt.plot(t,omega_B_hat, 'C1', label='Est. Bit Velocity')
plt.plot(t,omega_B_hat_upperbound, 'C3--', label='Upper STD bound')
plt.plot(t,omega_B_hat_lowerbound, 'C3--', label='Lower STD bound')
plt.title('True vs Estimated Bit Velocity')
plt.xlabel('Time [sec]')
plt.ylabel('Bit Velocity [rads/sec]')
plt.legend()
# Plot estimated bit velocity plus/minus one sigma
plt.subplot(2,1,2)
# Plot error between true and estimated bit velocity
plt.plot(t,omega_B_tilde, 'C2')
plt.title('True vs Estimated Error (Kalman Filter)')
plt.xlabel('Time [sec]')
plt.ylabel('Bit Velocity [rads/sec]')
plt.show()
```
Problem 6:
A: Use the original 3-equation ODE system, but replace the $k\theta$ term to reflect the nonlinear equations.
- $\frac{d\theta}{dt} = \omega_{T} - \omega_{B}$
- $\frac{dw_T}{dt} = {\tau(t) - b\omega_{T}(t) -[k_{1}\theta(t) + k_{2}\theta^{3}(t)]}\frac{1}{J_T}$
- $\frac{dw_B}{dt} = {-\tau(t) - b\omega_{B}(t) - [k_{1}\theta(t) + k_{2}\theta^{3}(t)]}\frac{1}{J_B}$
- $\frac{d\omega_T}{dt} = \omega_T$
- $\frac{d\omega_B}{dt} = \omega_B$
Create F(t) and H(t) matrices:
- $F(t)= \begin{bmatrix}
0 & 1 & -1 \\
\frac{-k_{1}}{J_T} - \frac{3k_{2}}{J_B}\theta^{2} & \frac{-b}{J_T} & 0 \\
\frac{k_{1}}{J_T} + \frac{3k_{2}}{J_B}\theta^{2} & 0 & \frac{-b}{J_B}\\
\end{bmatrix}$
- $H(t)= \begin{bmatrix} 0 & 1 & 0 \end{bmatrix}$
```
## Problem 6 - Extended Kalman Filter
# New nonlinear spring parameters
k1 = 2
k2 = 0.25
# Noise Covariances
W = 0.005 * np.identity(3)#You design this one.
N = 0.02
Sig0 = np.identity(3)
# Initial Condition
x_hat0 = [0,0,0]
states0 = np.r_[x_hat0, np.squeeze(np.asarray(Sig0.reshape(9,1)))]
# Ordinary Differential Equation for Kalman Filter
def ode_ekf(z,it):
# Parse States
theta_hat = z[0]
omega_T_hat = z[1]
omega_B_hat = z[2]
Sig = np.matrix((z[3:]).reshape(3,3))
# Interpolate input signal data
iTorq = interp(it, t, Torq)
iy_m = interp(it, t, y_m)
# Compute Jacobians
F = np.matrix([[0,1,-1],
[(-k1/J_T)-(3*k2/J_T)*theta_hat**2,-b/J_T,0],
[(k1/J_B)+(3*k2/J_B)*theta_hat**2,0,-b/J_B]
])
H = np.matrix([[0,1,0]])
# Compute Kalman Gain
L = (Sig * H.T* (1/N))
# Compute EKF system matrices
y_hat = omega_T_hat
theta_hat_dot = (omega_T_hat
- omega_B_hat
+ L[0] * (iy_m-y_hat))
omega_T_hat_dot = omega_T_hat - omega_B_hat + L[0] * (iy_m-y_hat)
omega_T_hat_dot =((iTorq/J_T)
- (b*omega_T_hat/J_T)
- (k1*theta_hat+k2*theta_hat**3)/J_T
+ L[1] * (iy_m-y_hat))
omega_B_hat_dot =(-(b*omega_B_hat/J_B)
+ (k1*theta_hat+k2*theta_hat**3)/J_B
+ L[2] * (iy_m-y_hat))
# Riccati Equation
Sig_dot = ((Sig*F.T) + (F*Sig) + W - Sig * H.T * (1/N) * H * Sig)
# Concatenate LHS
z_dot = np.r_[theta_hat_dot, omega_T_hat_dot, omega_B_hat_dot, Sig_dot.reshape(9,1)]
return(np.squeeze(np.asarray(z_dot)))
# Integrate Extended Kalman Filter ODEs
z = odeint(ode_ekf, states0, t)
# Parse States
theta_hat = z[:,0]
omega_T_hat = z[:,1]
omega_B_hat = z[:,2]
Sig33 = z[:,-1]
omega_B_tilde = omega_B_true - omega_B_hat
omega_B_hat_upperbound = omega_B_hat + np.sqrt(Sig33)
omega_B_hat_lowerbound = omega_B_hat - np.sqrt(Sig33)
RMSE = np.sqrt(np.mean(np.power(omega_B_tilde,2)))
print('Extended Kalman Filter RMSE: ' + str(RMSE) + ' rad/s')
# Plot Results
plt.figure(num=3, figsize=(8, 9), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(2,1,1)
# Plot true and estimated bit velocity
plt.plot(t,omega_B_true, 'C0', label='True Bit Velocity')
plt.plot(t,omega_B_hat, 'C1', label='Est. Bit Velocity')
# Plot estimated bit velocity plus/minus one sigma
plt.plot(t,omega_B_hat_upperbound, 'C3--', label='Upper STD Bound')
plt.plot(t,omega_B_hat_lowerbound, 'C3--', label='Lower STD Bound')
plt.title('True vs Estimated Bit Velocity (EKF)')
plt.xlabel('Time [sec]')
plt.ylabel('Bit Velocity [rads/sec]')
plt.legend('')
plt.subplot(2,1,2)
# Plot error between true and estimated bit velocity
plt.plot(t, omega_B_tilde,'C2')
plt.title('True vs Estimated Error (EKF)')
plt.xlabel('Time [sec]')
plt.ylabel('Bit Velocity [rads/sec]')
plt.show()
```
|
github_jupyter
|
## CE 295 - Energy Systems and Control
# HW 2 : State Estimation in Geothermal Heat Pump Drilling
# Oski Bear, SID 18681868
# Prof. Moura
# Due Date is written here
# BEAR_OSKI_HW2.ipynb
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy import interp
from scipy import signal
%matplotlib inline
from __future__ import division
import pandas as pd
import control # Read http://python-control.sourceforge.net/manual/
fs = 15 # Font Size for plots
# Drill String Parameters
J_T = 100 # Table/top rotational inertia
J_B = 25 # Bottom/bit rotational inertia
k = 2 # Spring constant
b = 5 # Drag coefficient
## Problem 2 - Observability Analysis
# State space matrices
A4 = np.matrix([[-b/J_T,0,-k/J_T,k/J_T],
[0,-b/J_B,k/J_B,-k/J_B],
[1,0,0,0],
[0,1,0,0]])
B4 = np.matrix([[1/J_T,0],
[0,-1/J_B],
[0,0],
[0,0]])
C4 = np.matrix([[1,0,0,0]])
# Compute observability Matrix for 4-state system and rank
O4 = control.obsv(A4,C4)
print('Rank of Observability Matrix for four-state system')
print(np.linalg.matrix_rank(O4))
# New A Matrix, for 3-state system
A = np.matrix([[-b/J_T,0,-k/J_T],
[0,-b/J_B,k/J_B],
[1,-1,0]
])
B = np.matrix([[1/J_T],
[0],
[0]
])
C = np.matrix([[0,1,0]])
D = np.matrix([0]) #Add empty D for Q4
# Observability Matrix for 3-state system and rank
O = control.obsv(A,C)
print('Rank of Observability Matrix for three-state system')
print(np.linalg.matrix_rank(O))
## Load Data
data=np.asarray(pd.read_csv("HW2_Data.csv",header=None))
t = data[:,0] # t : time vector [sec]
y_m = data[:,1] # y_m : measured table velocity [radians/sec]
Torq = data[:,2] # Torq: table torque [N-m]
omega_B_true = data[:,3] # \omega_B : true rotational speed of bit [radians/sec]
# Plot Data
plt.figure(num=1, figsize=(8, 9), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(2,1,1)
plt.plot(t, Torq)
plt.ylabel('Torque [N-m]')
plt.xlabel('Time [sec]')
plt.title('Torque vs Time')
# Plot table torque
plt.subplot(2,1,2)
plt.plot(t, y_m, color='g')
plt.ylabel('Velocity [rads/sec]')
plt.xlabel('Time [sec]')
plt.title('Measured Table Velocity vs Time')
plt.tight_layout()
# Plot measured table velocity
plt.show()
## Problem 4 - Luenberger Observer
# Eigenvalues of open-loop system
print('Eigenvalues of open-loop system:')
lam_A,evec = np.linalg.eig(A)
print(lam_A)
# Desired poles of estimation error system
# They should have negative real parts
# Complex conjugate pairs
lam_luen = lam_A * 6
# Compute observer gain (See Remark 3.1 in Notes. Use "acker" command)
L = control.acker(A.T,C.T,lam_luen).T
# State-space Matrices for Luenberger Observer
A_lobs = (A - L*C)
B_lobs = np.hstack((B - L*D, L))#TODO HELP
C_lobs = C
D_lobs = np.matrix([[0,0]])
sys_lobs = signal.lti(A_lobs,B_lobs,C_lobs,D_lobs)
# Inputs to observer
u = np.array([Torq, y_m]).T
# Initial Conditions
x_hat0 = [0,0,0]
# Simulate Response
tsim, y, x_hat = signal.lsim(sys_lobs, U=u, T=t, X0=x_hat0)
# Parse states
theta_hat = x_hat[:,2]
omega_T_hat = x_hat[:,0]
omega_B_hat = x_hat[:,1]
#Add RMS
luen_est_err = omega_B_true-omega_B_hat
RMSE = np.sqrt(np.mean(np.power(omega_B_true-omega_B_hat,2)))
print('Luenberger RMSE: ' + str(RMSE) + ' rad/s')
# Plot Results
plt.figure(num=1, figsize=(8, 9), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(2,1,1)
# Plot true and estimated bit velocity
plt.plot(t,omega_B_true, 'C0',label='Bit Velocity')
plt.plot(t,omega_B_hat, 'C1', label='Est. Bit Velocity')
plt.xlabel('Velocity [rad/sec]')
plt.ylabel('Time [sec]')
plt.title('True vs Estimated Bit Velocity (Luenberger Observer)')
plt.legend()
plt.subplot(2,1,2)
# Plot error between true and estimated bit velocity
plt.plot(t,luen_est_err, 'C2')
plt.xlabel('Velocity [rad/sec]')
plt.ylabel('Time [sec]')
plt.title('True vs Estimated Error rate')
plt.show()
## Problem 5 - Kalman Filter
# Noise Covariances
W = .0005 * np.identity(3)# Should be 3x3 because of the # of x states
N = .02
Sig0 = np.identity(3)
# Initial Condition
x_hat0 = [0,0,0]
states0 = np.r_[x_hat0, np.squeeze(np.asarray(Sig0.reshape(9,1)))]
# Ordinary Differential Equation for Kalman Filter
def ode_kf(z,it):
# Parse States
x_hat = np.matrix(z[:3]).T
Sig = np.matrix((z[3:]).reshape(3,3))
# Interpolate input signal data
iTorq = interp(it, t, Torq)
iy_m = interp(it, t, y_m)
# Compute Kalman Gain
L = Sig * C.T * (1/N)
# Kalman Filter
x_hat_dot = A * x_hat + B * iTorq + L * (iy_m - (C * x_hat))
# Riccati Equation
Sig_dot = Sig * A.T + A * Sig + W - Sig * C.T * (1/N) * C * Sig
# Concatenate LHS
z_dot = np.r_[x_hat_dot, Sig_dot.reshape(9,1)]
return(np.squeeze(np.asarray(z_dot)))
# Integrate Kalman Filter ODEs
z = odeint(ode_kf, states0, t)
# Parse States
theta_hat = z[:,2]
omega_T_hat = z[:,0]
omega_B_hat = z[:,1]
Sig33 = z[:,11] # Parse out the (3,3) element of Sigma only!
omega_B_tilde = omega_B_true - omega_B_hat
omega_B_hat_upperbound = omega_B_hat + np.sqrt(Sig33)
omega_B_hat_lowerbound = omega_B_hat - np.sqrt(Sig33)
RMSE = np.sqrt(np.mean(np.power(omega_B_tilde,2)))
print('Kalman Filter RMSE: ' + str(RMSE) + ' rad/s')
# Plot Results
plt.figure(num=3, figsize=(8, 9), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(2,1,1)
# Plot true and estimated bit velocity
plt.plot(t,omega_B_true,'C0', label='True Bit Velocity')
plt.plot(t,omega_B_hat, 'C1', label='Est. Bit Velocity')
plt.plot(t,omega_B_hat_upperbound, 'C3--', label='Upper STD bound')
plt.plot(t,omega_B_hat_lowerbound, 'C3--', label='Lower STD bound')
plt.title('True vs Estimated Bit Velocity')
plt.xlabel('Time [sec]')
plt.ylabel('Bit Velocity [rads/sec]')
plt.legend()
# Plot estimated bit velocity plus/minus one sigma
plt.subplot(2,1,2)
# Plot error between true and estimated bit velocity
plt.plot(t,omega_B_tilde, 'C2')
plt.title('True vs Estimated Error (Kalman Filter)')
plt.xlabel('Time [sec]')
plt.ylabel('Bit Velocity [rads/sec]')
plt.show()
## Problem 6 - Extended Kalman Filter
# New nonlinear spring parameters
k1 = 2
k2 = 0.25
# Noise Covariances
W = 0.005 * np.identity(3)#You design this one.
N = 0.02
Sig0 = np.identity(3)
# Initial Condition
x_hat0 = [0,0,0]
states0 = np.r_[x_hat0, np.squeeze(np.asarray(Sig0.reshape(9,1)))]
# Ordinary Differential Equation for Kalman Filter
def ode_ekf(z,it):
# Parse States
theta_hat = z[0]
omega_T_hat = z[1]
omega_B_hat = z[2]
Sig = np.matrix((z[3:]).reshape(3,3))
# Interpolate input signal data
iTorq = interp(it, t, Torq)
iy_m = interp(it, t, y_m)
# Compute Jacobians
F = np.matrix([[0,1,-1],
[(-k1/J_T)-(3*k2/J_T)*theta_hat**2,-b/J_T,0],
[(k1/J_B)+(3*k2/J_B)*theta_hat**2,0,-b/J_B]
])
H = np.matrix([[0,1,0]])
# Compute Kalman Gain
L = (Sig * H.T* (1/N))
# Compute EKF system matrices
y_hat = omega_T_hat
theta_hat_dot = (omega_T_hat
- omega_B_hat
+ L[0] * (iy_m-y_hat))
omega_T_hat_dot = omega_T_hat - omega_B_hat + L[0] * (iy_m-y_hat)
omega_T_hat_dot =((iTorq/J_T)
- (b*omega_T_hat/J_T)
- (k1*theta_hat+k2*theta_hat**3)/J_T
+ L[1] * (iy_m-y_hat))
omega_B_hat_dot =(-(b*omega_B_hat/J_B)
+ (k1*theta_hat+k2*theta_hat**3)/J_B
+ L[2] * (iy_m-y_hat))
# Riccati Equation
Sig_dot = ((Sig*F.T) + (F*Sig) + W - Sig * H.T * (1/N) * H * Sig)
# Concatenate LHS
z_dot = np.r_[theta_hat_dot, omega_T_hat_dot, omega_B_hat_dot, Sig_dot.reshape(9,1)]
return(np.squeeze(np.asarray(z_dot)))
# Integrate Extended Kalman Filter ODEs
z = odeint(ode_ekf, states0, t)
# Parse States
theta_hat = z[:,0]
omega_T_hat = z[:,1]
omega_B_hat = z[:,2]
Sig33 = z[:,-1]
omega_B_tilde = omega_B_true - omega_B_hat
omega_B_hat_upperbound = omega_B_hat + np.sqrt(Sig33)
omega_B_hat_lowerbound = omega_B_hat - np.sqrt(Sig33)
RMSE = np.sqrt(np.mean(np.power(omega_B_tilde,2)))
print('Extended Kalman Filter RMSE: ' + str(RMSE) + ' rad/s')
# Plot Results
plt.figure(num=3, figsize=(8, 9), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(2,1,1)
# Plot true and estimated bit velocity
plt.plot(t,omega_B_true, 'C0', label='True Bit Velocity')
plt.plot(t,omega_B_hat, 'C1', label='Est. Bit Velocity')
# Plot estimated bit velocity plus/minus one sigma
plt.plot(t,omega_B_hat_upperbound, 'C3--', label='Upper STD Bound')
plt.plot(t,omega_B_hat_lowerbound, 'C3--', label='Lower STD Bound')
plt.title('True vs Estimated Bit Velocity (EKF)')
plt.xlabel('Time [sec]')
plt.ylabel('Bit Velocity [rads/sec]')
plt.legend('')
plt.subplot(2,1,2)
# Plot error between true and estimated bit velocity
plt.plot(t, omega_B_tilde,'C2')
plt.title('True vs Estimated Error (EKF)')
plt.xlabel('Time [sec]')
plt.ylabel('Bit Velocity [rads/sec]')
plt.show()
| 0.718693 | 0.947088 |
# Section IV. DYNAMICS AND CONTROL
# Chapter 16. Control of Articulated Robots
Articulated robot control is a well-understood field, and industrial
robots can attain impressive levels of precision even along complex
trajectories. However, for optimal performance it is important for the
controller designer and the trajectory designer (or trajectory planner)
to cooperate or at least be cognizant about the limitations of each
others' components.
As an example, consider an industrial robot performing a semi-repetitive
task like welding, where improving productivity means reducing the
duration of every cycle. The robot cannot instantaneously accelerate and
decelerate due to inertia, so if the desired trajectory slows down
faster than the robot's motors can brake, then the robot will overshoot
the desired target. Also, if the robot is operating in tight quarters,
the amount of clearance needed between the robot and obstacles is
typically proportional to its speed. The applications engineer must
either closely coordinate with the controls engineer to design the
trajectory according to the controller's specifications --- or else face
a tedious process of trial-and-error.
In many respects this chapter is quite vague on the details about *how
to actually implement* a beautifully-performing articulated robot
controller. This is by necessity; almost every robot that you purchase
or build is going to have a different set of control capabilities and
interfaces. An inexpensive hobby robot will not, by and large, have
pinpoint accuracy and convenient trajectory design tools. A hydraulic
industrial robot will not have a light touch. In most situations, as a
robot engineer you often must (if I may twist a quote by Donald
Rumsfeld) go to war with the robot you have, not the robot you wish you
have. And one of the first battles you will face is to "wrap" the
out-of-the-box control functionality of the robot into a useful,
familiar interface that will help the application designer work with
higher-level primitive operations.
Who controls what? The motor controller
---------------------------------------
*************

<div class="figcaption"><a name="fig:MotorControllerDiagram">Figure 1</a>
System diagram of a motor controller.
</div>
*************
Rarely would anyone find a robot whose computer applies voltages or
currents directly to the motors. Rather, between the CPU and the motors
sits a device called the *motor controller* (or more simply, the
controller) that takes digital inputs from the CPU and outputs analog
voltages or currents to the motors. It may also have the responsibility
of converting readings from the motor encoders back into position
readings for the computer.
The controller's electronics or microprocessor perform some simple
transformations of the digital input to derive the voltage or current
signal. In the simplest case, it may just act as a digital-to-analog
converter, in which case it is called a *motor driver*. However, most
controllers perform other operations as well, such as calibration, PID
control, filtering, velocity limiting, temperature regulation, and/or
joint trajectory interpolation. These are important functions to prevent
damage to the motor, such as burnout, and can also be more convenient
for the end user. The end user in this case would be an application
designer or a control engineer, who is interested in higher-level
concerns than how the motor is driven. However, this also adds to
complexity, since the user must understand the controller's functions
and limitations!
In an ideal world, the designers of motor controllers should also
understand and implement the end user's desired functionality of the
motor. Institutions with dedicated mechatronics labs might be able to do
this, but more likely the mechatronic specialists are separate from the
control engineers. As a result the burden likely falls on the user to
understand (and compensate for!) the choices made by the motor
controller designer. It can be frustrating to plan for, say, controlling
a robot with velocity inputs\... and then to learn that the controller
only supports Go-To commands.
Therefore, the first step in deciding how to control a robot is to
characterize the inputs and performance characteristics of the
controller. Some of these characteristics include maximum executable
velocity or acceleration, PID gains, frequency response, tracking error,
nominal and peak load, etc. More sophisticated (and expensive)
controllers may also provide fault detection and recovery, motion
queues, and gravity compensation. Another degree of sophistication is
whether the motor controller operates a single joint (requiring multiple
controllers per robot) or multiple joints. The latter are known as
multi-axis controllers, and can perform sophisticated operations
including Cartesian control and synchronized Go-To movements onboard.
This offloads work that would otherwise be performed on the CPU onto the
controller.
As a general rule of thumb, cheap motor controllers require the
application developer to put in more effort to provide high-level
functionality, like trajectory execution or Cartesian movement. But more
expensive controllers require the application developer to spend more
time reverse-engineering the controller's behavior. For example, how
long will it take to go from configuration A to configuration B? The
rest of this chapter will describe how high-level functionality might be
provided --- whether it resides on the CPU or the motor controller.
Trajectory following
--------------------
Articulated robots are an example of a second order dynamic system, and
here we consider only fully-actuated articulated robots in which each
configuration variable has a corresponding control variable. In other
words, $n = 2m$, since the state variable $x=(q,\dot{q})$ is the phase
of the system, and the control $u$ is assumed to affect the rate of
change of $\dot{q}$.
The basic idea of trajectory following is to define a desired joint
space trajectory $q_D(t)$ and to have the robot's actual trajectory
$q(t)$ follow it. Assuming the robot can precisely measure its
configuration and velocity, the general method for doing so is simply to
send the desired configuration $q_D(t)$ and velocity and $\dot{q}_D(t)$
to a lower level controller, such as a PID controller, while updating
the setpoint at a sufficiently high rate. However, to obtain accurate
tracking, the designer of the trajectory must be cognizant of the
control performance of the robot as a second order system, since at the
lowest level the motor controller must eventually convert setpoint
commands into joint torques.
First, discontinuities in $q_D(t)$ should be forbidden since the robot
will accelerate quickly, and possibly overshoot the target. Similarly,
discontinuities in $\dot{q}_D(t)$ should be avoided if possible, because
exact tracking would require instantaneous acceleration. Naively
requesting sudden changes of velocity runs the risk of overheating
motors and overshooting setpoints.
Secondly, joint stops and self-collision should be avoided with some
margin of error, since the controller will not execute the trajectory
exactly, especially at high speed. The magnitude of the margins should
be chosen proportionally to the magnitude of control errors, which
depend on the stiffness of the underlying controller.
There are several functions that a motor controller may provide to
enforce trajectory smoothness and safety. Or, the end user might need to
(or to prefer to) implement these his/herself. These functions will be
discussed below.
### Motion queues (motion generation)
To improve the convenience for a robot application designer, and to
avoid some of the risks of allowing execution of non-smooth
trajectories, most motor controllers provide a Go-To command, in which
the desired endpoint is reached gradually over time. In essence, the
motor controller is generating the robot's reference trajectory $q_D(t)$
as it goes.
*************

<div class="figcaption"><a name="fig:MotionQueue">Figure 2</a>.
Illustrating a 3-axis motion queue. The robot's future trajectory is
created as requested by the application, and over time the controller
incrementally feeds setpoints to a lower-level PID
controller.
</div>
*************
This is a simple form of a *motion queue* mode of operation in which the
application requests milestones or trajectory segments to be queued up
on the controller (i.e., multiple Go-To commands, or a Move-Path
command). Over time, the controller will execute the requested
trajectory sequence without further input from the application
([Fig. 2](#fig:MotionQueue)). The convenience of this mode of operation
is quite apparent when programming long sequences of instructions with
discrete event logic.
To implement such motion queues, the controller maintains a future
trajectory in its memory. As in typical trajectory following, time is
advanced on each control loop, and the desired setpoint is updated.
Furthermore, the future trajectory can be modified at any time by an
external request signal (e.g., a function call). For example, adding a
Go-To request will append an interpolating curve from the end of the
queued trajectory to the requested target position (a *waypoint*). If
the current queue is empty, then the curve will originate from the
current robot position. This type of asynchronous request is convenient
because the application developer does not need to develop a fixed-rate
control loop. Moreover, motion queues might be directed to cycle, making
it convenient to define repetitive movements used in factory settings.
Motion queue methods differ greatly in their interpolating curves, speed
and velocity control, their ability to handle joint-space vs. Cartesian
space targets, and whether an existing queue may be interrupted. Some of
these differences are discussed below.
### Interpolation profiles
Given the robot at rest and a target configuration, an *interpolation
profile* is a continuous trajectory that satisfies certain operational
constraints on the robot's joint dynamics. A sequence of Go-To commands
can be implemented by concatenating several interpolation profiles, in
which the robot stops/starts at subsequent waypoint configurations.
#### Piecewise linear
The most basic interpolation profile is a piecewise linear curve.
Additional milestones are added in a manner that limits the maximum
absolute joint velocity
([Fig. 3](#fig:PiecewiseLinearInterpolation)). However, the trajectory
exhibits instantaneous changes of velocity. This type of profile is most
often observed in inexpensive hobbyist controllers.
*************

<div class="figcaption"><a name="fig:PiecewiseLinearInterpolation">Figure 3</a>.
Illustrating the use of piecewise linear interpolation profiles. At
time $t_0$, a commanded waypoint $\theta_g$ is sent, and an
interpolating curve is constructed (left). Then, at time $t_1$, a new
waypoint command $\theta_{g2}$ is sent that interrupts execution of the
existing curve
(right).
</div>
*************
#### Trapezoidal velocity profile
A refinement of this approach is a bounded-acceleration,
bounded-velocity curve. To arrive at a target configuration in a
minimum-time fashion under these bounds, while also starting and
stopping at 0 velocity, a *trapezoidal velocity profile* may be used
([Fig. 4](#fig:TrapezoidalVelocityProfile)). Assume the waypoint joint
value $\theta_{g}$ is greater than the start value $\theta_{s}$ (in the
reverse case, the profile may simply be flipped). The velocity profile
ramps up from 0 at maximum acceleration $a_{max}$ until the maximum
velocity $v_{max}$ is reached, then proceeds at constant velocity for
some time, and then decelerates to 0 velocity at minimum slope. If the
target is close to the original configuration, the maximum velocity may
not actually be reached, in which case the velocity profile takes on a
triangular shape.
*************

<div class="figcaption"><a name="fig:TrapezoidalVelocityProfile">Figure 4</a>.
Trapezoidal velocity profile interpolation. The joint trajectory
switches between parabolic, linear, and parabolic
segments.
</div>
*************
The duration $t_f$ of the 2 or 3 segments of the profile are determined
analytically from $\theta_{g}-\theta_{s}$ and the acceleration /
velocity bounds. Specifically, first consider the 2-segment case. The
curve $\theta(t)$ begins with an upwards-facing parabola for duration
$t_f/2$ and ends with a downwards facing parabola with an equal
duration. Hence, the displacement at the midpoint
$(\theta_{g}-\theta_{s})/2$ must be equal to
$\frac{1}{2} a_{max} (t_f/2)^2$. In other words,
$t_f = 2 \sqrt{(\theta_{g}-\theta_{s})/a_{max}}$. If the midpoint
velocity $a_{max} t_f /2 > v_{max}$ , then the velocity bound is reached
and we must use the 3 segment case. In this case, the first segment
accelerates at acceleration $a_{max}$ for time $t_1 = v_{max}/a_{max}$,
and the last segment decelerates for the same amount of time
$t_3 = t_1$. In this time they each cover distance
$\frac{1}{2} a_{max} (v_{max}/a_{max})^2 = v_{max}^2 /(2 a_{max})$, and
hence the second segment must cover distance
$\theta_g - \theta_s - v_{max}^2 /a_{max}$. It does so at speed
$v_{max}$, so it takes time
$t_2 = (\theta_g - \theta_s)/v_{max} - v_{max}/a_{max}$. Ultimately, the
overall time is
$t_f = 2t_1 + t_2 = (\theta_g - \theta_s)/v_{max} + v_{max}/a_{max}$.
##### Bounded-jerk interpolation
A second potential extension, which provides even smoother curves, is a
bounded-jerk trajectory. *Jerk* is another name for the third time
derivative of a function, and bounded-jerk paths will see the
acceleration profile having a positive trapezoid and negative
trapezoidal components. Determination of the number of segments, segment
timing, and overall duration is similar to the above construction,
although higher-order polynomials need to be solved.
##### General methods and cubic curves
Other potential interpolation profiles include cubic curves (Hermite
interpolation) and sinusoidal curves (cosine smoothing). In each case,
coefficients of the profile and the interpolation duration are
determined by matching the boundary conditions and any operational
constraints on the velocity or acceleration of the curve.
For example, a cubic curve $\theta(u)$ interpolating between $\theta_s$
and $\theta_g$ over the range $u\in [0,1]$ has the form
$$\theta(u) = \theta_s + (\theta_g-\theta_s)(a + bu + cu^2 + du^3)$$
Using the boundary conditions $\theta(0) = \theta_s$,
$\theta(1) = \theta_g$, $\theta'(0) = 0$, and $\theta'(1)=0$, we have
four linear constraints that must be satisfied: $a=0$, $a+b+c+d=1$,
$b = 0$, and $b + 2c + 3d = 0$. The solution is $a=0$, $b=0$, $c=3$,
$d=-2$, and hence
$$\theta(u) = \theta_s + (\theta_g-\theta_s)(3u^2 - 2u^3)$$ is a smooth
interpolator known as the Hermite curve.
The remaining question is how to set the time duration of the curve
$t_f$ to maintain velocity and acceleration limits. We define
$u = t/t_f$, giving the expression for velocity
$$\dot{\theta}(t) = \frac{d}{dt} \theta(u(t)) = \theta^\prime(u(t)) u^\prime(t) = (\theta_g-\theta_s)(6u - 6u^2) /t_f = 6(\theta_g-\theta_s)(t/t_f^2 - t^2/t_f^3)$$
This is maximized at $t=t_f/2$, which gives the maximum velocity along
the curve as a function of $t_f$
$$v^\star(t_f) = \frac{3}{2}(\theta_g-\theta_s)/t_f.$$ The acceleration
is $$\ddot{\theta}(t) = 6(\theta_g-\theta_s)(1/t_f^2 - 2t/t_f^3)$$ which
has maximum absolute value at the endpoints. Hence, the maximum
acceleration along the curve is
$$a^\star(t_f) = 6(\theta_g-\theta_s)/t_f^2.$$ It is then a simple
matter to determine whether the curve is acceleration limited or
velocity limited, and then to set $t_f$ appropriately.
### Speed and velocity control
Speed control is a fairly common addition for Go-To commands, and simply
modulates the rate of progression of "time" along the trajectory.
However, to avoid sudden changes in velocity it is important not to
modify path speed instantaneously while the robot is moving.
Alternatively, speed control may be provided by modulating the maximum
velocity / acceleration used to derive the interpolation profile.
Velocity control is a desirable capability for maximizing robot speed
because the target joint velocities can be chosen so that movement to
the next milestone does not require stopping. Trapezoidal velocity
profiles and cubic curves are the most convenient forms for allowing
waypoint velocities. To do so, the specified velocities should be
incorporated as boundary conditions when solving for the form of the
interpolation profile.
### Cartesian commands
More sophisticated multi-axis motor controllers, such as those found in
industrial robots, may mix joint-space and Cartesian commands
([Fig. 5](#fig:MixedCartesianQueuing)).
*************

<div class="figcaption"><a name="fig:MixedCartesianQueuing">Figure 5</a>.
A motor controller may mix Cartesian and joint-space motion queuing.
Left: the end effector trace of a 2R manipulator with a mixture of joint
space (solid) and Cartesian space (dotted) waypoint commands. Right: the
same trace, but in joint
space.
</div>
*************
Cartesian motion queue
functionality is useful to guide tools along workspace paths, while
joint-space waypoints are superior for moving quickly between
configurations. A motion queue can construct Cartesian interpolation
profiles, with two caveats.
First, IK solutions along the interpolation profiles should be verified
to exist when given a Cartesian Go-To command. If no solution exists,
the command should return an error code.
Secondly, during execution, an IK solver must be used to find the joint
space configuration and velocity for the interpolated Cartesian target
position. These values will be then used as setpoints to the low level
controller. Alternatively, operational space control methods can be used
for direct Cartesian control without the use of an IK solver using only
Jacobian information. Operational space control will be briefly covered
below. In either case, error codes may be invoked during execution when
no IK solution exists, the robot hits joint limits, or passes near
singularities.
### Interruption
Interruption is a helpful feature for motion queues when the robot may
need to react to unexpected conditions. As an example, a stop and reset
may be triggered when the robot mistakenly drops an object during a
transfer task.
To stop movement an unspecified time, it is a simple matter to clear the
remainder of the queue once a robot reaches a stopped waypoint. But if
the robot should be stopped during the middle of movement, some care
must be taken. One option could be to execute a maximal braking
trajectory, which slows down each joint as quickly as possible. The
issue with this approach is that the robot will likely drift from the
desired trajectory because each joint is slowed at a different rate,
which may cause collisions. An alternative is just to slow the robot
down along its current trajectory by reducing the path speed to zero.
Another mode of interruption is to instantaneously start moving to a new
waypoint, clearing the remainder of the queue. This was illustrated for
the simple piecewise linear interpolation profile in
[Fig. 3](#fig:PiecewiseLinearInterpolation). However, to avoid sudden
changes of velocity, the interpolation profile used in the queue should
support nonzero initial velocities.
The most sophisticated mode of interruption would to allow unlimited
editing of the motion queue at any point in time. Providing such
functionality usually requires a fair amount of bookkeeping on the
controller, and hence is rarely implemented for most industrial robots.
### State machines
For many tasks, the robot controller should perform some notion of
*switching* between discrete modes of operation. For example, to
implement Cartesian commands and braking trajectories, the controller
must activate different functions, accept different input, and report
different status codes depending on which mode it is in. This type of
functionality can be thought of as a *state machine*, and is usually
implemented as one as well.
A state machine prescribes several possible discrete *states*
$S_0,S_1,\ldots,S_N$ under which the controller can operate. (These are
distinct from the robot's state in the context of dynamics, and order
does not matter.) The controller can be in one state
$S(t) \in \{ S_0,S_1,\ldots,S_N \}$ at any given time, starting with the
initial state $S_0$. The controller still operates on a fixed control
loop, and performs whatever processing is designated by state $S(t)$.
This includes input-output processing, but also potentially reacting to
signals which indicate that the state machine should change state. These
signals $T_{ij}$ are known as *transitions*. If at time $t$,
$S(t) = S_i$, and transition $T_{ij}$ becomes active, then we change the
state $S(t+\Delta t)\gets S_j$ for the next time step. (If multiple
transition signals are raised at the same time, the controller
implements some tie-breaking policy.)
*************

<div class="figcaption"><a name="fig:StateMachine">Figure 6</a>.
Left: A state machine is a directed graph that describes the discrete
modes that a controller can switch between. Right: An example state
machine for a controller that detects hardware faults and can switch
between joint space commands, Cartesian commands, and braking
commands.
</div>
*************
State machines are a very useful tool for visualizing the functionality
of controllers from low-level motor controllers to high-level
supervisors, and are typically drawn as directed graphs
([Fig. 6](#fig:StateMachine)). We shall revisit these diagrams later
when we discuss [system integration](SystemIntegration.ipynb).
Feedforward torque control
--------------------------
Although PID control is sometimes adequate for strong motors or
lightweight robots, it is not sufficient to achieve the level of
trajectory tracking accuracy demanded by industrial applications. There
are three reasons for this.
1. Gravity biases the torques needed to keep the robot upright. A PID
controller compensates for gravity via the integral term, but the
amount of compensation required is highly configuration dependent.
In fact, if a joint axis flips orientation, the torque needed to
resist a given load will need to entirely reverse sign.
2. The effective inertia of the subchain at each joint is highly
dependent on its configuration. For example, if an arm is
outstretched, the shoulder joint will need to apply stiffer torques
to track a trajectory than if it is tucked.
3. At high speed, the Coriolis term becomes significant, causing
biasing torques that must be compensated.
As a result, many industrial robots use model-based techniques to
calculate *feedforward torques* that reflect some of the biases coming
from the robot's dynamics. The simplest form of feedforward torque
computation is *gravity compensation*. In this mode of operation, the
feedforward torques only include the generalized gravity term of the
robot's dynamics equation: $$\tau_{ff} = G(q)$$ where $q$ is the current
sensed configuration. This torque is combined with the feedback torque
$\tau_{fb}$ resulting from the PID controller to obtain the overall
torque: $$\tau = \tau_{fb} + \tau_{ff}.$$ In this form of operation,
when the robot is stationary, the force from gravity will be exactly
compensated for by $\tau_{ff}$, and hence $\tau_{fb}=0$ since the PID
controller will not observe any error. This does, however, assume that
the masses, centers of masses, and link lengths of all robot links are
measured precisely for the calculation of $G$. In practice, these
estimates will have small errors and hence the feedback term is required
for regulating the errors between the true $G(q)$ and the estimate
$\tilde{G}(q)$. Moreover, once the robot begins moving, the PID
controller will need to take on the responsibility of compensating for
inertial and Coriolis effects. See
[Fig. 7](#fig:PIDGravityCompensation) for an illustration.
*************

<div class="figcaption"><a name="fig:PIDGravityCompensation">Figure 7</a>.
A comparison of piecewise linear and piecewise cubic trajectories
executed by PID control or gravity compensation. Left: the two
trajectories. Right: trajectory tracking errors comparing PID control
(PID) against gravity compensation (GC) for the two types of
trajectories. Cubic trajectories demonstrate lower transient error
during movement, while GC converges more quickly toward zero
steady-state error.
</div>
*************
A more sophisticated controller is *computed torque control*, which also
accounts for inertial and Coriolis effects during movement. Supposing we
have a desired acceleration $\ddot{q}_D$. If we use inverse dynamics to
calculate the necessary torque needed to guide the robot upon this
acceleration: $$\tau_{ID}(\ddot{q}) = B(q) \ddot{q} + C(q,\dot q) + G(q)
\label{eq:InverseDynamics}$$ then feeding this torque to the robot will
generate the desired acceleration, assuming no disturbances and modeling
errors. There are generally two methods for compensating for error. The
first method uses $\tau_{ID}$ as a feedforward control, combined with a
PID feedback: $$\tau = \tau_{ID}(\ddot{q}_D) + \tau_{fb}.$$ The second
method, *computed torque control*, instead computes a desired
acceleration as the sum of $\ddot{q}_D$ with a feedback acceleration:
$$\ddot{q}_{fb} = - K_P(q-q_D) - K_I I(t) - K_D(\dot{q}-\dot{q}_D)$$ The
resulting controller uses the torques computed for the feedforward +
feedback accelerations:
$$\tau = \tau_{ID}(\ddot{q}_D + \ddot{q}_{fb}).$$ This approach is
appealing because with acceleration being the control variable, the PID
constants can be chosen easily so the joint space dynamics are
convergent. The main drawback of computed torque control methods is the
additional computational expense of computing inverse dynamics, which
must be recomputed on a per-timestep basis. It also faces the general
difficulty of calibrating accurate inertial parameters of a robot.
Operational space control
-------------------------
It is often desired to control the end effector of an articulated robot
more accurately in Cartesian space than in joint space. *Operational
space control* is a method for doing so without the use of inverse
kinematics. Another application of operational space control is to apply
or respond to forces in Cartesian space, which is a key step in *force
control* and *impedance control*. Properly applying these concepts
requires a deep understanding of how to transform forces and robot
dynamics between joint space and the operational space.
Here, operational space is synonymous with the definition of task space
that we have used throughout this course. Specifically, a task space
$x = \phi(q)$ is defined with the Jacobian
$J(q) = \frac{\partial \phi}{\partial q}(q)$. As usual, the task space
is typically a combination of Cartesian position and/or orientation of
an end effector. (Here we have chosen the notation $\phi$ rather than
the more typical $x = f(q)$ so as to not conflict with the $f(x,u)$
notation used for a dynamic system.)
### Mathematical derivation
To get a sense for this approach, let us suppose that the robot's
dynamics are first-order and velocity controlled. In other words, we
wish to determine a control $u$ such that $\dot{q} = u$. Let $x_D(t)$ be
a task space trajectory that we wish to control, and let $x = \phi(q)$
be the current task space coordinate. We can define a commanded task
velocity $\dot{x}_{cmd} = -K_P (x - x_D(t)) + \dot{x}_D(t)$ via a
feedback position gain $K_P$ and the feedforward velocity
$\dot{x}_D(t)$. To convert from the task velocity to the control, we can
use the Jacobian relationship $\dot{x} = J(q)\dot{q}$. Using the
pseudoinverse, we have $u = \dot{q} = J(q)^+ \dot{x}_{cmd}$. If $x_D$ is
stationary, then given any starting configuration, this will drive the
task coordinate $x$ to converge toward $x_D$.
Note that we could also use the Jacobian transpose rule
$\dot{q} = J(q)^T \dot{x}_{cmd}$ and derive a stable controller in task
space. To see this, observe that the dynamics of the task space error
$e = x - x_D$ are given by $$\dot{e} = J \dot{q} = - J J^T K_P e.$$
Hypothesizing the Lyapunov function $V(e) = \frac{1}{2} e^T K_P e$, we
can calculate
$$\dot{V}(e) = \frac{\partial V}{\partial e}(e)^T \dot{e}.$$ Under the
observation that $\frac{\partial V}{\partial e}(e) = K_P e$, observe
that $\dot{V}(e) = -e^T K_P J J^T K_P e$. If $J$ and $K_P$ are
nonsingular, then $K_P J J^T K_P$ is a symmetric positive definite
matrix, and hence this is a negative definite function, i.e., negative
for all $e\neq 0$. Hence, this system is Lyapunov stable away from
singularities.
Typically, operational space control is applied with second-order
dynamics and a torque-controlled robot. This makes it possible to treat
the operational coordinate as a sort of virtual rigid body under
spring-damper dynamics. Suppose we wish to control the dynamics of the
task space coordinate according to the trajectory $x_D(t)$. Then the
desired acceleration is
$$\ddot{x}_{cmd} = -K_P(x-x_D(t)) - K_D(\dot{x} - \dot{x}_D) + \ddot{x}_D(t).
\label{eq:SpringDamperAcceleration}$$ Computing the time derivative of
the task space mapping twice, we obtain:
$$\ddot{x} = \frac{d}{d t}(J(q)\dot{q}) = J(q)\ddot{q} + \dot{J}(q)\dot{q}$$
Let us examine how the task space acceleration is affected by task space
forces, $f_{cmd}$, such that the resulting torque is
$\tau = J^T f_{cmd}$. For convenience of the expression, we drop the
dependence of several terms on $q$: $$\begin{split}
\ddot{x} &= J(B^{-1}(\tau - C(q,\dot{q}) - G) + \dot{J}\dot{q} \\
&= JB^{-1} J^T f_{ext} - JB^{-1} C(q,\dot{q}) - JB^{-1} G + \dot{J}\dot{q}
\end{split}$$
Recall from the [discussion on effective inertia](RobotDynamics.ipynb#Effective-inertia)
that the matrix $\tilde{B} = (JB^{-1}J^T)^{-1}$ is the effective inertia of the task
space coordinate. Multiplying both sides by $\tilde{B}$ and rearranging,
we get
$$\tilde{B}\ddot{x} + \tilde{B}(JB^{-1} C(q,\dot{q}) - \dot{J}\dot{q}) + \tilde{B}JB^{-1}G = f_{ext}$$
If we let
$\tilde{C}(q,\dot{q}) = \tilde{B}(JB^{-1} C(q,\dot{q}) - \dot{J}\dot{q})$
and $\tilde{G} = \tilde{B}JB^{-1}G$, we have the task space dynamics
expressed in a familiar form:
$$\tilde{B}\ddot{x} + \tilde{C}(q,\dot{q}) + \tilde{G} = f_{ext}.$$
Thus, with a little linear algebra, we can solve for $f_{ext}$ such that
$\ddot{x} = \ddot{x}_{cmd}$. Applying $\tau = J^T f_{cmd}$ we can
finally derive the output joint torques $\tau$.
The result of operational space control (with $K_P$, $K_D$ diagonal) is
that the system response to step changes in a Cartesian task space is
linear in Cartesian space. If same step change was achieved by a change
in configuration setpoint to a PID controller, the system response would
be nonlinear.
However, there are a number of difficulties in implementing operational
space control. First, it is important not to let the Jacobian become
singular, because the effective inertia will become infinite. Even if it
is near singular, the inversion involved in computing the effective
inertia matrix will cause unacceptably large torques to be computed.
Second, if the configuration has met one or more joint limits, some
directions in the task space will become uncontrollable. In light of
these issues achieving stable operational control often requires a
significant amount of tuning.
### Force and impedance control
Using operational space control and a force sensor, it is possible to
obtain a number of interesting behaviors, including Cartesian force
control where the end effector can apply a specified force, or "zero
gravity mode" where the end effector behaves like a free-floating rigid
body. These two behaviors are known, respectively, as *force control*
and *impedance control*.
In both cases, force sensing can be accomplished with a sensing element
(a force/torque sensor) on the end effector, or indirectly via joint
torque sensors. Joint torque sensing is generally less accurate at
estimating the forces applied to the end effector due to loss of
observability at singularities, but does not require external equipment.
It should be noted that force sensors are prone to noise, requires
frequent recalibration, and runs a risk of irreparable damage to force
sensors by overloading. Moreover, to obtain stable control loops a great
deal of tuning, calibration, and optimization of sensor signal filters,
the force sensor's inertial parameters, the robot's inertial parameters,
and time delays. Hence, implementing force and impedance control
successfully in practice requires substantial amounts of effort from
even experienced control engineers.
Force control is the process of regulating a force applied to the
environment in closed-loop fashion. To illustrate how this might works,
consider a 1-D task space $x = \phi(q)$ denoting the end effector
position along some axis. Assume that the robot is contacting the
environment at the initial configuration $q_0$, and that we would like
to apply a desired force $f_D$ at the end effector. The basic idea of
force control is to sense the external force $f_{ext}$ and if it is too
high, to reduce $x$, and if is too low, to increase $x$. Although this
is conceptually simple, the implementation is not!
The main issues to address are that 1) the force sensor does not
directly report $f_{ext}$ but rather a combination of external force,
gravity, inertial forces due to end-effector acceleration, and noise, 2)
it is not clear how much to increase or reduce $x$ to obtain a desired
change in force, and 3) those changes in $x$ need to be converted into
joint-level commands. Operational control handles issue 3, but not 1 or 2.
To extract the external forces from the sensed forces, the operation
of a force sensor should be modeled in detail. Accelerations will be
observed as force spikes in the sensed force signal, and changes in the
end effector orientation will be observed as a bias in the force signal:
$$f_{sensed} = f_{ext} + f_{gravity} + f_{inertial}
\label{eq:ForceSensor}$$ The best way to estimate the external force is
to model the force of gravity and inertia on the sensor by estimating
the sensor's orientation and acceleration and subtracting their sum from
the sensed force. Orientation and acceleration must be estimated from
the robot's encoders or an inertial measurement unit.
Next, we need some way to link the deviation of force $f_D - f_{ext}$
and the control signal given to the operational space controller
(usually $\ddot{x}_{cmd}$ or if joint torque control is available, the
commanded end effector force $f_{cmd}$). This can be done using a PID
loop where $f_D - f_{ext}$ is the error signal and the operational space
control signal is the output. In the case that $\ddot{x}_{cmd}$ is the
output, the gain should be on the order of the inverse of the stiffness
of the environment - end effector system, while if it is the end
effector force, the gains should be on the order of 1.
Impedance control implements a behavior where the robot's end effector
as though it were a mass-spring-damper (MSD) system subjected to an
external force (usually without gravity). The equations of an MSD are as
follows: $$\ddot{x} = M^{-1} (f_{ext} - K (x_D-x) - D \dot{x}_D)
\label{eq:MassSpringDamper}$$ with $M$ the mass matrix, $K$ the
stiffness matrix, and $D$ the damping matrix. For example, a
zero-gravity mode sets $K$ and $D$ to 0 and the robot's end effector
behaves like a free-floating mass in response to applied forces. To
simulate MSD behavior on a robot's end effector, equation
($\ref{eq:MassSpringDamper}$) can be used in conjunction with
($\ref{eq:ForceSensor}$) to estimate $f_{ext}$, and the resulting
$\ddot{x}$ should be fed into an operational space controller as
$\ddot{x}_{cmd}$.
An implementer of such systems should be aware that it is difficult to
simulate a low-mass MSD with a high-mass robot, since the accelerations
encountered by the mass in response to an applied force may exceed the
limits of the robot's torques. The range of accelerations that a robot
can perform is also known as its *control bandwidth*. Lighter robots,
with stronger motors, and with low mass near the end effector tend
to have larger control bandwidth than heavy ones.
Summary
-------
Key takeaways:
- To maximize performance of an articulated robot, its trajectories
should be designed with motor controller characteristics in mind
(and vice versa).
- A robot's commands from its CPU are processed through a motor
controller board, which may implement wildly differing amounts of
functionality for safety, accuracy, and convenience purposes.
- Motion queues are a common function of motor controllers that create
trajectory interpolation profiles from asynchronous commands.
- Gravity compensation and computed torque control are two forms of
feedforward torque control that improve the accuracy of trajectory
tracking.
- Operational space control redefines the PID control in some
Cartesian task space rather than joint space. This allows relating
torques to forces, as needed for applications like force and
impedance control.
Exercises
---------
1. Suppose that your robot only has an ability to perform velocity
control, in which the CPU specifies the joints' desired velocities.
Commands are read in at a fixed rate (say, 100Hz). Describe how you
would implement a PID controller on the CPU that tracks a trajectory
$q_D(t)$. Keep in mind that velocities are not executed perfectly,
and that the robot should obtain 0 steady-state error if the
trajectory is stationary.
2. Propose a data representation and algorithms for a motion queue
controller such that updating the current setpoint takes $O(1)$
time, and such that an application on the robot's CPU can append to
it and clear it in $O(1)$ time. Your implementation should discard
portions of the queue that are in the past, either with a linked
list or circular buffer. (For simplicity, use piecewise linear
interpolation.)
3. Motor controllers have a limited amount of memory and must determine
desired setpoints at a very fast rate. Why must the motion queue
size, in practice, be limited? If interrupts are allowed, what else
limits the motion queue size?
4. Empirically study the performance of high-gain PID control on a
simulated 6DOF robot. First, tune the constants so that it is
stable. How does it react to gravity when it is first turned on? How
does it react to disturbances? How well does it track when commanded
to a very different configuration?
5. Consider a straight line movement in Cartesian space. In the absence
of disturbances, how would the output of an operational space
controller compare to that of a Cartesian motion queue executed with
a joint space PID controller? How would they differ in rejecting
disturbances?
6. **Implementation.** Implement a Cartesian velocity motion queue
controller for a planar 2R manipulator. As commands, the controller
should take velocities $(v_x,v_y)$ and movement durations $t$, and
simply use piecewise linear interpolation. Develop some sensible
scheme to handle singularities and joint limits by stopping motion.
Test its performance in simulation with a variety of command
sequences.
|
github_jupyter
|
# Section IV. DYNAMICS AND CONTROL
# Chapter 16. Control of Articulated Robots
Articulated robot control is a well-understood field, and industrial
robots can attain impressive levels of precision even along complex
trajectories. However, for optimal performance it is important for the
controller designer and the trajectory designer (or trajectory planner)
to cooperate or at least be cognizant about the limitations of each
others' components.
As an example, consider an industrial robot performing a semi-repetitive
task like welding, where improving productivity means reducing the
duration of every cycle. The robot cannot instantaneously accelerate and
decelerate due to inertia, so if the desired trajectory slows down
faster than the robot's motors can brake, then the robot will overshoot
the desired target. Also, if the robot is operating in tight quarters,
the amount of clearance needed between the robot and obstacles is
typically proportional to its speed. The applications engineer must
either closely coordinate with the controls engineer to design the
trajectory according to the controller's specifications --- or else face
a tedious process of trial-and-error.
In many respects this chapter is quite vague on the details about *how
to actually implement* a beautifully-performing articulated robot
controller. This is by necessity; almost every robot that you purchase
or build is going to have a different set of control capabilities and
interfaces. An inexpensive hobby robot will not, by and large, have
pinpoint accuracy and convenient trajectory design tools. A hydraulic
industrial robot will not have a light touch. In most situations, as a
robot engineer you often must (if I may twist a quote by Donald
Rumsfeld) go to war with the robot you have, not the robot you wish you
have. And one of the first battles you will face is to "wrap" the
out-of-the-box control functionality of the robot into a useful,
familiar interface that will help the application designer work with
higher-level primitive operations.
Who controls what? The motor controller
---------------------------------------
*************

<div class="figcaption"><a name="fig:MotorControllerDiagram">Figure 1</a>
System diagram of a motor controller.
</div>
*************
Rarely would anyone find a robot whose computer applies voltages or
currents directly to the motors. Rather, between the CPU and the motors
sits a device called the *motor controller* (or more simply, the
controller) that takes digital inputs from the CPU and outputs analog
voltages or currents to the motors. It may also have the responsibility
of converting readings from the motor encoders back into position
readings for the computer.
The controller's electronics or microprocessor perform some simple
transformations of the digital input to derive the voltage or current
signal. In the simplest case, it may just act as a digital-to-analog
converter, in which case it is called a *motor driver*. However, most
controllers perform other operations as well, such as calibration, PID
control, filtering, velocity limiting, temperature regulation, and/or
joint trajectory interpolation. These are important functions to prevent
damage to the motor, such as burnout, and can also be more convenient
for the end user. The end user in this case would be an application
designer or a control engineer, who is interested in higher-level
concerns than how the motor is driven. However, this also adds to
complexity, since the user must understand the controller's functions
and limitations!
In an ideal world, the designers of motor controllers should also
understand and implement the end user's desired functionality of the
motor. Institutions with dedicated mechatronics labs might be able to do
this, but more likely the mechatronic specialists are separate from the
control engineers. As a result the burden likely falls on the user to
understand (and compensate for!) the choices made by the motor
controller designer. It can be frustrating to plan for, say, controlling
a robot with velocity inputs\... and then to learn that the controller
only supports Go-To commands.
Therefore, the first step in deciding how to control a robot is to
characterize the inputs and performance characteristics of the
controller. Some of these characteristics include maximum executable
velocity or acceleration, PID gains, frequency response, tracking error,
nominal and peak load, etc. More sophisticated (and expensive)
controllers may also provide fault detection and recovery, motion
queues, and gravity compensation. Another degree of sophistication is
whether the motor controller operates a single joint (requiring multiple
controllers per robot) or multiple joints. The latter are known as
multi-axis controllers, and can perform sophisticated operations
including Cartesian control and synchronized Go-To movements onboard.
This offloads work that would otherwise be performed on the CPU onto the
controller.
As a general rule of thumb, cheap motor controllers require the
application developer to put in more effort to provide high-level
functionality, like trajectory execution or Cartesian movement. But more
expensive controllers require the application developer to spend more
time reverse-engineering the controller's behavior. For example, how
long will it take to go from configuration A to configuration B? The
rest of this chapter will describe how high-level functionality might be
provided --- whether it resides on the CPU or the motor controller.
Trajectory following
--------------------
Articulated robots are an example of a second order dynamic system, and
here we consider only fully-actuated articulated robots in which each
configuration variable has a corresponding control variable. In other
words, $n = 2m$, since the state variable $x=(q,\dot{q})$ is the phase
of the system, and the control $u$ is assumed to affect the rate of
change of $\dot{q}$.
The basic idea of trajectory following is to define a desired joint
space trajectory $q_D(t)$ and to have the robot's actual trajectory
$q(t)$ follow it. Assuming the robot can precisely measure its
configuration and velocity, the general method for doing so is simply to
send the desired configuration $q_D(t)$ and velocity and $\dot{q}_D(t)$
to a lower level controller, such as a PID controller, while updating
the setpoint at a sufficiently high rate. However, to obtain accurate
tracking, the designer of the trajectory must be cognizant of the
control performance of the robot as a second order system, since at the
lowest level the motor controller must eventually convert setpoint
commands into joint torques.
First, discontinuities in $q_D(t)$ should be forbidden since the robot
will accelerate quickly, and possibly overshoot the target. Similarly,
discontinuities in $\dot{q}_D(t)$ should be avoided if possible, because
exact tracking would require instantaneous acceleration. Naively
requesting sudden changes of velocity runs the risk of overheating
motors and overshooting setpoints.
Secondly, joint stops and self-collision should be avoided with some
margin of error, since the controller will not execute the trajectory
exactly, especially at high speed. The magnitude of the margins should
be chosen proportionally to the magnitude of control errors, which
depend on the stiffness of the underlying controller.
There are several functions that a motor controller may provide to
enforce trajectory smoothness and safety. Or, the end user might need to
(or to prefer to) implement these his/herself. These functions will be
discussed below.
### Motion queues (motion generation)
To improve the convenience for a robot application designer, and to
avoid some of the risks of allowing execution of non-smooth
trajectories, most motor controllers provide a Go-To command, in which
the desired endpoint is reached gradually over time. In essence, the
motor controller is generating the robot's reference trajectory $q_D(t)$
as it goes.
*************

<div class="figcaption"><a name="fig:MotionQueue">Figure 2</a>.
Illustrating a 3-axis motion queue. The robot's future trajectory is
created as requested by the application, and over time the controller
incrementally feeds setpoints to a lower-level PID
controller.
</div>
*************
This is a simple form of a *motion queue* mode of operation in which the
application requests milestones or trajectory segments to be queued up
on the controller (i.e., multiple Go-To commands, or a Move-Path
command). Over time, the controller will execute the requested
trajectory sequence without further input from the application
([Fig. 2](#fig:MotionQueue)). The convenience of this mode of operation
is quite apparent when programming long sequences of instructions with
discrete event logic.
To implement such motion queues, the controller maintains a future
trajectory in its memory. As in typical trajectory following, time is
advanced on each control loop, and the desired setpoint is updated.
Furthermore, the future trajectory can be modified at any time by an
external request signal (e.g., a function call). For example, adding a
Go-To request will append an interpolating curve from the end of the
queued trajectory to the requested target position (a *waypoint*). If
the current queue is empty, then the curve will originate from the
current robot position. This type of asynchronous request is convenient
because the application developer does not need to develop a fixed-rate
control loop. Moreover, motion queues might be directed to cycle, making
it convenient to define repetitive movements used in factory settings.
Motion queue methods differ greatly in their interpolating curves, speed
and velocity control, their ability to handle joint-space vs. Cartesian
space targets, and whether an existing queue may be interrupted. Some of
these differences are discussed below.
### Interpolation profiles
Given the robot at rest and a target configuration, an *interpolation
profile* is a continuous trajectory that satisfies certain operational
constraints on the robot's joint dynamics. A sequence of Go-To commands
can be implemented by concatenating several interpolation profiles, in
which the robot stops/starts at subsequent waypoint configurations.
#### Piecewise linear
The most basic interpolation profile is a piecewise linear curve.
Additional milestones are added in a manner that limits the maximum
absolute joint velocity
([Fig. 3](#fig:PiecewiseLinearInterpolation)). However, the trajectory
exhibits instantaneous changes of velocity. This type of profile is most
often observed in inexpensive hobbyist controllers.
*************

<div class="figcaption"><a name="fig:PiecewiseLinearInterpolation">Figure 3</a>.
Illustrating the use of piecewise linear interpolation profiles. At
time $t_0$, a commanded waypoint $\theta_g$ is sent, and an
interpolating curve is constructed (left). Then, at time $t_1$, a new
waypoint command $\theta_{g2}$ is sent that interrupts execution of the
existing curve
(right).
</div>
*************
#### Trapezoidal velocity profile
A refinement of this approach is a bounded-acceleration,
bounded-velocity curve. To arrive at a target configuration in a
minimum-time fashion under these bounds, while also starting and
stopping at 0 velocity, a *trapezoidal velocity profile* may be used
([Fig. 4](#fig:TrapezoidalVelocityProfile)). Assume the waypoint joint
value $\theta_{g}$ is greater than the start value $\theta_{s}$ (in the
reverse case, the profile may simply be flipped). The velocity profile
ramps up from 0 at maximum acceleration $a_{max}$ until the maximum
velocity $v_{max}$ is reached, then proceeds at constant velocity for
some time, and then decelerates to 0 velocity at minimum slope. If the
target is close to the original configuration, the maximum velocity may
not actually be reached, in which case the velocity profile takes on a
triangular shape.
*************

<div class="figcaption"><a name="fig:TrapezoidalVelocityProfile">Figure 4</a>.
Trapezoidal velocity profile interpolation. The joint trajectory
switches between parabolic, linear, and parabolic
segments.
</div>
*************
The duration $t_f$ of the 2 or 3 segments of the profile are determined
analytically from $\theta_{g}-\theta_{s}$ and the acceleration /
velocity bounds. Specifically, first consider the 2-segment case. The
curve $\theta(t)$ begins with an upwards-facing parabola for duration
$t_f/2$ and ends with a downwards facing parabola with an equal
duration. Hence, the displacement at the midpoint
$(\theta_{g}-\theta_{s})/2$ must be equal to
$\frac{1}{2} a_{max} (t_f/2)^2$. In other words,
$t_f = 2 \sqrt{(\theta_{g}-\theta_{s})/a_{max}}$. If the midpoint
velocity $a_{max} t_f /2 > v_{max}$ , then the velocity bound is reached
and we must use the 3 segment case. In this case, the first segment
accelerates at acceleration $a_{max}$ for time $t_1 = v_{max}/a_{max}$,
and the last segment decelerates for the same amount of time
$t_3 = t_1$. In this time they each cover distance
$\frac{1}{2} a_{max} (v_{max}/a_{max})^2 = v_{max}^2 /(2 a_{max})$, and
hence the second segment must cover distance
$\theta_g - \theta_s - v_{max}^2 /a_{max}$. It does so at speed
$v_{max}$, so it takes time
$t_2 = (\theta_g - \theta_s)/v_{max} - v_{max}/a_{max}$. Ultimately, the
overall time is
$t_f = 2t_1 + t_2 = (\theta_g - \theta_s)/v_{max} + v_{max}/a_{max}$.
##### Bounded-jerk interpolation
A second potential extension, which provides even smoother curves, is a
bounded-jerk trajectory. *Jerk* is another name for the third time
derivative of a function, and bounded-jerk paths will see the
acceleration profile having a positive trapezoid and negative
trapezoidal components. Determination of the number of segments, segment
timing, and overall duration is similar to the above construction,
although higher-order polynomials need to be solved.
##### General methods and cubic curves
Other potential interpolation profiles include cubic curves (Hermite
interpolation) and sinusoidal curves (cosine smoothing). In each case,
coefficients of the profile and the interpolation duration are
determined by matching the boundary conditions and any operational
constraints on the velocity or acceleration of the curve.
For example, a cubic curve $\theta(u)$ interpolating between $\theta_s$
and $\theta_g$ over the range $u\in [0,1]$ has the form
$$\theta(u) = \theta_s + (\theta_g-\theta_s)(a + bu + cu^2 + du^3)$$
Using the boundary conditions $\theta(0) = \theta_s$,
$\theta(1) = \theta_g$, $\theta'(0) = 0$, and $\theta'(1)=0$, we have
four linear constraints that must be satisfied: $a=0$, $a+b+c+d=1$,
$b = 0$, and $b + 2c + 3d = 0$. The solution is $a=0$, $b=0$, $c=3$,
$d=-2$, and hence
$$\theta(u) = \theta_s + (\theta_g-\theta_s)(3u^2 - 2u^3)$$ is a smooth
interpolator known as the Hermite curve.
The remaining question is how to set the time duration of the curve
$t_f$ to maintain velocity and acceleration limits. We define
$u = t/t_f$, giving the expression for velocity
$$\dot{\theta}(t) = \frac{d}{dt} \theta(u(t)) = \theta^\prime(u(t)) u^\prime(t) = (\theta_g-\theta_s)(6u - 6u^2) /t_f = 6(\theta_g-\theta_s)(t/t_f^2 - t^2/t_f^3)$$
This is maximized at $t=t_f/2$, which gives the maximum velocity along
the curve as a function of $t_f$
$$v^\star(t_f) = \frac{3}{2}(\theta_g-\theta_s)/t_f.$$ The acceleration
is $$\ddot{\theta}(t) = 6(\theta_g-\theta_s)(1/t_f^2 - 2t/t_f^3)$$ which
has maximum absolute value at the endpoints. Hence, the maximum
acceleration along the curve is
$$a^\star(t_f) = 6(\theta_g-\theta_s)/t_f^2.$$ It is then a simple
matter to determine whether the curve is acceleration limited or
velocity limited, and then to set $t_f$ appropriately.
### Speed and velocity control
Speed control is a fairly common addition for Go-To commands, and simply
modulates the rate of progression of "time" along the trajectory.
However, to avoid sudden changes in velocity it is important not to
modify path speed instantaneously while the robot is moving.
Alternatively, speed control may be provided by modulating the maximum
velocity / acceleration used to derive the interpolation profile.
Velocity control is a desirable capability for maximizing robot speed
because the target joint velocities can be chosen so that movement to
the next milestone does not require stopping. Trapezoidal velocity
profiles and cubic curves are the most convenient forms for allowing
waypoint velocities. To do so, the specified velocities should be
incorporated as boundary conditions when solving for the form of the
interpolation profile.
### Cartesian commands
More sophisticated multi-axis motor controllers, such as those found in
industrial robots, may mix joint-space and Cartesian commands
([Fig. 5](#fig:MixedCartesianQueuing)).
*************

<div class="figcaption"><a name="fig:MixedCartesianQueuing">Figure 5</a>.
A motor controller may mix Cartesian and joint-space motion queuing.
Left: the end effector trace of a 2R manipulator with a mixture of joint
space (solid) and Cartesian space (dotted) waypoint commands. Right: the
same trace, but in joint
space.
</div>
*************
Cartesian motion queue
functionality is useful to guide tools along workspace paths, while
joint-space waypoints are superior for moving quickly between
configurations. A motion queue can construct Cartesian interpolation
profiles, with two caveats.
First, IK solutions along the interpolation profiles should be verified
to exist when given a Cartesian Go-To command. If no solution exists,
the command should return an error code.
Secondly, during execution, an IK solver must be used to find the joint
space configuration and velocity for the interpolated Cartesian target
position. These values will be then used as setpoints to the low level
controller. Alternatively, operational space control methods can be used
for direct Cartesian control without the use of an IK solver using only
Jacobian information. Operational space control will be briefly covered
below. In either case, error codes may be invoked during execution when
no IK solution exists, the robot hits joint limits, or passes near
singularities.
### Interruption
Interruption is a helpful feature for motion queues when the robot may
need to react to unexpected conditions. As an example, a stop and reset
may be triggered when the robot mistakenly drops an object during a
transfer task.
To stop movement an unspecified time, it is a simple matter to clear the
remainder of the queue once a robot reaches a stopped waypoint. But if
the robot should be stopped during the middle of movement, some care
must be taken. One option could be to execute a maximal braking
trajectory, which slows down each joint as quickly as possible. The
issue with this approach is that the robot will likely drift from the
desired trajectory because each joint is slowed at a different rate,
which may cause collisions. An alternative is just to slow the robot
down along its current trajectory by reducing the path speed to zero.
Another mode of interruption is to instantaneously start moving to a new
waypoint, clearing the remainder of the queue. This was illustrated for
the simple piecewise linear interpolation profile in
[Fig. 3](#fig:PiecewiseLinearInterpolation). However, to avoid sudden
changes of velocity, the interpolation profile used in the queue should
support nonzero initial velocities.
The most sophisticated mode of interruption would to allow unlimited
editing of the motion queue at any point in time. Providing such
functionality usually requires a fair amount of bookkeeping on the
controller, and hence is rarely implemented for most industrial robots.
### State machines
For many tasks, the robot controller should perform some notion of
*switching* between discrete modes of operation. For example, to
implement Cartesian commands and braking trajectories, the controller
must activate different functions, accept different input, and report
different status codes depending on which mode it is in. This type of
functionality can be thought of as a *state machine*, and is usually
implemented as one as well.
A state machine prescribes several possible discrete *states*
$S_0,S_1,\ldots,S_N$ under which the controller can operate. (These are
distinct from the robot's state in the context of dynamics, and order
does not matter.) The controller can be in one state
$S(t) \in \{ S_0,S_1,\ldots,S_N \}$ at any given time, starting with the
initial state $S_0$. The controller still operates on a fixed control
loop, and performs whatever processing is designated by state $S(t)$.
This includes input-output processing, but also potentially reacting to
signals which indicate that the state machine should change state. These
signals $T_{ij}$ are known as *transitions*. If at time $t$,
$S(t) = S_i$, and transition $T_{ij}$ becomes active, then we change the
state $S(t+\Delta t)\gets S_j$ for the next time step. (If multiple
transition signals are raised at the same time, the controller
implements some tie-breaking policy.)
*************

<div class="figcaption"><a name="fig:StateMachine">Figure 6</a>.
Left: A state machine is a directed graph that describes the discrete
modes that a controller can switch between. Right: An example state
machine for a controller that detects hardware faults and can switch
between joint space commands, Cartesian commands, and braking
commands.
</div>
*************
State machines are a very useful tool for visualizing the functionality
of controllers from low-level motor controllers to high-level
supervisors, and are typically drawn as directed graphs
([Fig. 6](#fig:StateMachine)). We shall revisit these diagrams later
when we discuss [system integration](SystemIntegration.ipynb).
Feedforward torque control
--------------------------
Although PID control is sometimes adequate for strong motors or
lightweight robots, it is not sufficient to achieve the level of
trajectory tracking accuracy demanded by industrial applications. There
are three reasons for this.
1. Gravity biases the torques needed to keep the robot upright. A PID
controller compensates for gravity via the integral term, but the
amount of compensation required is highly configuration dependent.
In fact, if a joint axis flips orientation, the torque needed to
resist a given load will need to entirely reverse sign.
2. The effective inertia of the subchain at each joint is highly
dependent on its configuration. For example, if an arm is
outstretched, the shoulder joint will need to apply stiffer torques
to track a trajectory than if it is tucked.
3. At high speed, the Coriolis term becomes significant, causing
biasing torques that must be compensated.
As a result, many industrial robots use model-based techniques to
calculate *feedforward torques* that reflect some of the biases coming
from the robot's dynamics. The simplest form of feedforward torque
computation is *gravity compensation*. In this mode of operation, the
feedforward torques only include the generalized gravity term of the
robot's dynamics equation: $$\tau_{ff} = G(q)$$ where $q$ is the current
sensed configuration. This torque is combined with the feedback torque
$\tau_{fb}$ resulting from the PID controller to obtain the overall
torque: $$\tau = \tau_{fb} + \tau_{ff}.$$ In this form of operation,
when the robot is stationary, the force from gravity will be exactly
compensated for by $\tau_{ff}$, and hence $\tau_{fb}=0$ since the PID
controller will not observe any error. This does, however, assume that
the masses, centers of masses, and link lengths of all robot links are
measured precisely for the calculation of $G$. In practice, these
estimates will have small errors and hence the feedback term is required
for regulating the errors between the true $G(q)$ and the estimate
$\tilde{G}(q)$. Moreover, once the robot begins moving, the PID
controller will need to take on the responsibility of compensating for
inertial and Coriolis effects. See
[Fig. 7](#fig:PIDGravityCompensation) for an illustration.
*************

<div class="figcaption"><a name="fig:PIDGravityCompensation">Figure 7</a>.
A comparison of piecewise linear and piecewise cubic trajectories
executed by PID control or gravity compensation. Left: the two
trajectories. Right: trajectory tracking errors comparing PID control
(PID) against gravity compensation (GC) for the two types of
trajectories. Cubic trajectories demonstrate lower transient error
during movement, while GC converges more quickly toward zero
steady-state error.
</div>
*************
A more sophisticated controller is *computed torque control*, which also
accounts for inertial and Coriolis effects during movement. Supposing we
have a desired acceleration $\ddot{q}_D$. If we use inverse dynamics to
calculate the necessary torque needed to guide the robot upon this
acceleration: $$\tau_{ID}(\ddot{q}) = B(q) \ddot{q} + C(q,\dot q) + G(q)
\label{eq:InverseDynamics}$$ then feeding this torque to the robot will
generate the desired acceleration, assuming no disturbances and modeling
errors. There are generally two methods for compensating for error. The
first method uses $\tau_{ID}$ as a feedforward control, combined with a
PID feedback: $$\tau = \tau_{ID}(\ddot{q}_D) + \tau_{fb}.$$ The second
method, *computed torque control*, instead computes a desired
acceleration as the sum of $\ddot{q}_D$ with a feedback acceleration:
$$\ddot{q}_{fb} = - K_P(q-q_D) - K_I I(t) - K_D(\dot{q}-\dot{q}_D)$$ The
resulting controller uses the torques computed for the feedforward +
feedback accelerations:
$$\tau = \tau_{ID}(\ddot{q}_D + \ddot{q}_{fb}).$$ This approach is
appealing because with acceleration being the control variable, the PID
constants can be chosen easily so the joint space dynamics are
convergent. The main drawback of computed torque control methods is the
additional computational expense of computing inverse dynamics, which
must be recomputed on a per-timestep basis. It also faces the general
difficulty of calibrating accurate inertial parameters of a robot.
Operational space control
-------------------------
It is often desired to control the end effector of an articulated robot
more accurately in Cartesian space than in joint space. *Operational
space control* is a method for doing so without the use of inverse
kinematics. Another application of operational space control is to apply
or respond to forces in Cartesian space, which is a key step in *force
control* and *impedance control*. Properly applying these concepts
requires a deep understanding of how to transform forces and robot
dynamics between joint space and the operational space.
Here, operational space is synonymous with the definition of task space
that we have used throughout this course. Specifically, a task space
$x = \phi(q)$ is defined with the Jacobian
$J(q) = \frac{\partial \phi}{\partial q}(q)$. As usual, the task space
is typically a combination of Cartesian position and/or orientation of
an end effector. (Here we have chosen the notation $\phi$ rather than
the more typical $x = f(q)$ so as to not conflict with the $f(x,u)$
notation used for a dynamic system.)
### Mathematical derivation
To get a sense for this approach, let us suppose that the robot's
dynamics are first-order and velocity controlled. In other words, we
wish to determine a control $u$ such that $\dot{q} = u$. Let $x_D(t)$ be
a task space trajectory that we wish to control, and let $x = \phi(q)$
be the current task space coordinate. We can define a commanded task
velocity $\dot{x}_{cmd} = -K_P (x - x_D(t)) + \dot{x}_D(t)$ via a
feedback position gain $K_P$ and the feedforward velocity
$\dot{x}_D(t)$. To convert from the task velocity to the control, we can
use the Jacobian relationship $\dot{x} = J(q)\dot{q}$. Using the
pseudoinverse, we have $u = \dot{q} = J(q)^+ \dot{x}_{cmd}$. If $x_D$ is
stationary, then given any starting configuration, this will drive the
task coordinate $x$ to converge toward $x_D$.
Note that we could also use the Jacobian transpose rule
$\dot{q} = J(q)^T \dot{x}_{cmd}$ and derive a stable controller in task
space. To see this, observe that the dynamics of the task space error
$e = x - x_D$ are given by $$\dot{e} = J \dot{q} = - J J^T K_P e.$$
Hypothesizing the Lyapunov function $V(e) = \frac{1}{2} e^T K_P e$, we
can calculate
$$\dot{V}(e) = \frac{\partial V}{\partial e}(e)^T \dot{e}.$$ Under the
observation that $\frac{\partial V}{\partial e}(e) = K_P e$, observe
that $\dot{V}(e) = -e^T K_P J J^T K_P e$. If $J$ and $K_P$ are
nonsingular, then $K_P J J^T K_P$ is a symmetric positive definite
matrix, and hence this is a negative definite function, i.e., negative
for all $e\neq 0$. Hence, this system is Lyapunov stable away from
singularities.
Typically, operational space control is applied with second-order
dynamics and a torque-controlled robot. This makes it possible to treat
the operational coordinate as a sort of virtual rigid body under
spring-damper dynamics. Suppose we wish to control the dynamics of the
task space coordinate according to the trajectory $x_D(t)$. Then the
desired acceleration is
$$\ddot{x}_{cmd} = -K_P(x-x_D(t)) - K_D(\dot{x} - \dot{x}_D) + \ddot{x}_D(t).
\label{eq:SpringDamperAcceleration}$$ Computing the time derivative of
the task space mapping twice, we obtain:
$$\ddot{x} = \frac{d}{d t}(J(q)\dot{q}) = J(q)\ddot{q} + \dot{J}(q)\dot{q}$$
Let us examine how the task space acceleration is affected by task space
forces, $f_{cmd}$, such that the resulting torque is
$\tau = J^T f_{cmd}$. For convenience of the expression, we drop the
dependence of several terms on $q$: $$\begin{split}
\ddot{x} &= J(B^{-1}(\tau - C(q,\dot{q}) - G) + \dot{J}\dot{q} \\
&= JB^{-1} J^T f_{ext} - JB^{-1} C(q,\dot{q}) - JB^{-1} G + \dot{J}\dot{q}
\end{split}$$
Recall from the [discussion on effective inertia](RobotDynamics.ipynb#Effective-inertia)
that the matrix $\tilde{B} = (JB^{-1}J^T)^{-1}$ is the effective inertia of the task
space coordinate. Multiplying both sides by $\tilde{B}$ and rearranging,
we get
$$\tilde{B}\ddot{x} + \tilde{B}(JB^{-1} C(q,\dot{q}) - \dot{J}\dot{q}) + \tilde{B}JB^{-1}G = f_{ext}$$
If we let
$\tilde{C}(q,\dot{q}) = \tilde{B}(JB^{-1} C(q,\dot{q}) - \dot{J}\dot{q})$
and $\tilde{G} = \tilde{B}JB^{-1}G$, we have the task space dynamics
expressed in a familiar form:
$$\tilde{B}\ddot{x} + \tilde{C}(q,\dot{q}) + \tilde{G} = f_{ext}.$$
Thus, with a little linear algebra, we can solve for $f_{ext}$ such that
$\ddot{x} = \ddot{x}_{cmd}$. Applying $\tau = J^T f_{cmd}$ we can
finally derive the output joint torques $\tau$.
The result of operational space control (with $K_P$, $K_D$ diagonal) is
that the system response to step changes in a Cartesian task space is
linear in Cartesian space. If same step change was achieved by a change
in configuration setpoint to a PID controller, the system response would
be nonlinear.
However, there are a number of difficulties in implementing operational
space control. First, it is important not to let the Jacobian become
singular, because the effective inertia will become infinite. Even if it
is near singular, the inversion involved in computing the effective
inertia matrix will cause unacceptably large torques to be computed.
Second, if the configuration has met one or more joint limits, some
directions in the task space will become uncontrollable. In light of
these issues achieving stable operational control often requires a
significant amount of tuning.
### Force and impedance control
Using operational space control and a force sensor, it is possible to
obtain a number of interesting behaviors, including Cartesian force
control where the end effector can apply a specified force, or "zero
gravity mode" where the end effector behaves like a free-floating rigid
body. These two behaviors are known, respectively, as *force control*
and *impedance control*.
In both cases, force sensing can be accomplished with a sensing element
(a force/torque sensor) on the end effector, or indirectly via joint
torque sensors. Joint torque sensing is generally less accurate at
estimating the forces applied to the end effector due to loss of
observability at singularities, but does not require external equipment.
It should be noted that force sensors are prone to noise, requires
frequent recalibration, and runs a risk of irreparable damage to force
sensors by overloading. Moreover, to obtain stable control loops a great
deal of tuning, calibration, and optimization of sensor signal filters,
the force sensor's inertial parameters, the robot's inertial parameters,
and time delays. Hence, implementing force and impedance control
successfully in practice requires substantial amounts of effort from
even experienced control engineers.
Force control is the process of regulating a force applied to the
environment in closed-loop fashion. To illustrate how this might works,
consider a 1-D task space $x = \phi(q)$ denoting the end effector
position along some axis. Assume that the robot is contacting the
environment at the initial configuration $q_0$, and that we would like
to apply a desired force $f_D$ at the end effector. The basic idea of
force control is to sense the external force $f_{ext}$ and if it is too
high, to reduce $x$, and if is too low, to increase $x$. Although this
is conceptually simple, the implementation is not!
The main issues to address are that 1) the force sensor does not
directly report $f_{ext}$ but rather a combination of external force,
gravity, inertial forces due to end-effector acceleration, and noise, 2)
it is not clear how much to increase or reduce $x$ to obtain a desired
change in force, and 3) those changes in $x$ need to be converted into
joint-level commands. Operational control handles issue 3, but not 1 or 2.
To extract the external forces from the sensed forces, the operation
of a force sensor should be modeled in detail. Accelerations will be
observed as force spikes in the sensed force signal, and changes in the
end effector orientation will be observed as a bias in the force signal:
$$f_{sensed} = f_{ext} + f_{gravity} + f_{inertial}
\label{eq:ForceSensor}$$ The best way to estimate the external force is
to model the force of gravity and inertia on the sensor by estimating
the sensor's orientation and acceleration and subtracting their sum from
the sensed force. Orientation and acceleration must be estimated from
the robot's encoders or an inertial measurement unit.
Next, we need some way to link the deviation of force $f_D - f_{ext}$
and the control signal given to the operational space controller
(usually $\ddot{x}_{cmd}$ or if joint torque control is available, the
commanded end effector force $f_{cmd}$). This can be done using a PID
loop where $f_D - f_{ext}$ is the error signal and the operational space
control signal is the output. In the case that $\ddot{x}_{cmd}$ is the
output, the gain should be on the order of the inverse of the stiffness
of the environment - end effector system, while if it is the end
effector force, the gains should be on the order of 1.
Impedance control implements a behavior where the robot's end effector
as though it were a mass-spring-damper (MSD) system subjected to an
external force (usually without gravity). The equations of an MSD are as
follows: $$\ddot{x} = M^{-1} (f_{ext} - K (x_D-x) - D \dot{x}_D)
\label{eq:MassSpringDamper}$$ with $M$ the mass matrix, $K$ the
stiffness matrix, and $D$ the damping matrix. For example, a
zero-gravity mode sets $K$ and $D$ to 0 and the robot's end effector
behaves like a free-floating mass in response to applied forces. To
simulate MSD behavior on a robot's end effector, equation
($\ref{eq:MassSpringDamper}$) can be used in conjunction with
($\ref{eq:ForceSensor}$) to estimate $f_{ext}$, and the resulting
$\ddot{x}$ should be fed into an operational space controller as
$\ddot{x}_{cmd}$.
An implementer of such systems should be aware that it is difficult to
simulate a low-mass MSD with a high-mass robot, since the accelerations
encountered by the mass in response to an applied force may exceed the
limits of the robot's torques. The range of accelerations that a robot
can perform is also known as its *control bandwidth*. Lighter robots,
with stronger motors, and with low mass near the end effector tend
to have larger control bandwidth than heavy ones.
Summary
-------
Key takeaways:
- To maximize performance of an articulated robot, its trajectories
should be designed with motor controller characteristics in mind
(and vice versa).
- A robot's commands from its CPU are processed through a motor
controller board, which may implement wildly differing amounts of
functionality for safety, accuracy, and convenience purposes.
- Motion queues are a common function of motor controllers that create
trajectory interpolation profiles from asynchronous commands.
- Gravity compensation and computed torque control are two forms of
feedforward torque control that improve the accuracy of trajectory
tracking.
- Operational space control redefines the PID control in some
Cartesian task space rather than joint space. This allows relating
torques to forces, as needed for applications like force and
impedance control.
Exercises
---------
1. Suppose that your robot only has an ability to perform velocity
control, in which the CPU specifies the joints' desired velocities.
Commands are read in at a fixed rate (say, 100Hz). Describe how you
would implement a PID controller on the CPU that tracks a trajectory
$q_D(t)$. Keep in mind that velocities are not executed perfectly,
and that the robot should obtain 0 steady-state error if the
trajectory is stationary.
2. Propose a data representation and algorithms for a motion queue
controller such that updating the current setpoint takes $O(1)$
time, and such that an application on the robot's CPU can append to
it and clear it in $O(1)$ time. Your implementation should discard
portions of the queue that are in the past, either with a linked
list or circular buffer. (For simplicity, use piecewise linear
interpolation.)
3. Motor controllers have a limited amount of memory and must determine
desired setpoints at a very fast rate. Why must the motion queue
size, in practice, be limited? If interrupts are allowed, what else
limits the motion queue size?
4. Empirically study the performance of high-gain PID control on a
simulated 6DOF robot. First, tune the constants so that it is
stable. How does it react to gravity when it is first turned on? How
does it react to disturbances? How well does it track when commanded
to a very different configuration?
5. Consider a straight line movement in Cartesian space. In the absence
of disturbances, how would the output of an operational space
controller compare to that of a Cartesian motion queue executed with
a joint space PID controller? How would they differ in rejecting
disturbances?
6. **Implementation.** Implement a Cartesian velocity motion queue
controller for a planar 2R manipulator. As commands, the controller
should take velocities $(v_x,v_y)$ and movement durations $t$, and
simply use piecewise linear interpolation. Develop some sensible
scheme to handle singularities and joint limits by stopping motion.
Test its performance in simulation with a variety of command
sequences.
| 0.915536 | 0.949389 |
# MidiTok Full Workflow Example/Tutorial
***
Credit for GPT2-RGA code used in this colab goes out @ Sashmark97 https://github.com/Sashmark97/midigen and @ Damon Gwinn https://github.com/gwinndr/MusicTransformer-Pytorch
***
WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/
***
#### Project Los Angeles
#### Tegridy Code 2021
***
# (Setup Environment)
```
#@title nvidia-smi gpu check
!nvidia-smi
#@title Install all dependencies (run only once per session)
!pip install torch
!pip install tqdm
!pip install matplotlib
!pip install miditok
!wget 'https://raw.githubusercontent.com/asigalov61/tegridy-tools/main/tegridy-tools/GPT2RGA.py'
!wget 'https://github.com/asigalov61/Optimus-VIRTUOSO/raw/main/Samples/Relative-Global-Attention/Optimus-VIRTUOSO-RGA-Edition-Main-Sample.mid'
#@title Import all needed modules
print('Loading needed modules. Please wait...')
import os
from datetime import datetime
import secrets
import copy
import tqdm
from tqdm import auto
from GPT2RGA import *
from miditok import REMI, CPWord, get_midi_programs
from miditoolkit import MidiFile
import matplotlib.pyplot as plt
print('Done!')
print('Tokenizing source MIDI file...')
# Our parameters
pitch_range = range(21, 109)
beat_res = {(0, 4): 8, (4, 12): 4}
nb_velocities = 32
additional_tokens = {'Chord': True, 'Rest': True, 'Tempo': True,
'rest_range': (2, 8), # (half, 8 beats)
'nb_tempos': 32, # nb of tempo bins
'tempo_range': (40, 250), # (min, max)
'Program': False}
# Creates the tokenizer and loads a MIDI
tokenizer = REMI(pitch_range, beat_res, nb_velocities, additional_tokens) # REMI encoding
# tokenizer = CPWord(pitch_range, beat_res, nb_velocities, additional_tokens) # CP encoding
midi = MidiFile('Optimus-VIRTUOSO-RGA-Edition-Main-Sample.mid')
# Converts MIDI to tokens, and back to a MIDI
tokens = tokenizer.midi_to_tokens(midi)
print('Done!')
```
# (QUICK DEMO)
```
#@title Load processed INTs datasets
number_of_batches = 16 #@param {type:"slider", min:2, max:32, step:2}
n_workers = 6
print('=' * 50)
print('Prepping INTs datasets...')
train_data = []
train_data.extend(tokens[0])
val_dataset = train_data[:int(len(train_data) * 0.5)]
test_dataset = train_data[:int(len(train_data) * 0.5)]
train_list = train_data
val_list = val_dataset
test_list = []
print('=' * 50)
print('Processing INTs datasets...')
train_dataset = EPianoDataset(train_list, max_seq, random_seq)
val_dataset = EPianoDataset(val_list, max_seq)
test_dataset = EPianoDataset(test_list, max_seq)
print('=' * 50)
print('Loading INTs datasets...')
batch_size = number_of_batches
train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=n_workers, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=n_workers)
test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=n_workers)
print('=' * 50)
print('Total INTs in the dataset', len(train_data))
print('Total unique INTs in the dataset', len(set(train_data)))
print('Max INT in the dataset', max(train_data))
print('Min INT in the dataset', min(train_data))
print('=' * 50)
print('Checking datasets shapes...')
print('=' * 50)
print('Train loader')
for x, tgt in train_loader:
print(f'X shape: {x.shape}')
print(f'Target shape: {tgt.shape}')
break
print('=' * 50)
print('Validation loader')
for x, tgt in val_loader:
print(f'X shape: {x.shape}')
print(f'Target shape: {tgt.shape}')
break
print('=' * 50)
print('Test loader')
for x, tgt in test_loader:
print(f'X shape: {x.shape}')
print(f'Target shape: {tgt.shape}')
break
print('=' * 50)
print('Done! Enjoy! :)')
print('=' * 50)
```
# (TRAIN)
# Train the model
```
#@title Train
print('MidiTok Model Trainer')
config = GPTConfig(VOCAB_SIZE,
max_seq,
dim_feedforward=dim_feedforward,
n_layer=6,
n_head=8,
n_embd=512,
enable_rpr=True,
er_len=max_seq)
model = GPT(config).to(get_device())
#=====
init_step = 0
lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(d_model, SCHEDULER_WARMUP_STEPS, init_step)
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD)
train_loss_func = eval_loss_func
opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
lr_scheduler = LambdaLR(opt, lr_stepper.step)
#===
best_eval_acc = 0.0
best_eval_acc_epoch = -1
best_eval_loss = float("inf")
best_eval_loss_epoch = -1
best_acc_file = 'gpt2_rpr_acc.pth'
best_loss_file = 'gpt2_rpr_loss.pth'
loss_train, loss_val, acc_val = [], [], []
for epoch in range(0, epochs):
new_best = False
loss = train(epoch+1, model, train_loader, train_loss_func, opt, lr_scheduler, num_iters=-1)
loss_train.append(loss)
eval_loss, eval_acc = eval_model(model, val_loader, eval_loss_func, num_iters=-1)
loss_val.append(eval_loss)
acc_val.append(eval_acc)
if(eval_acc > best_eval_acc):
best_eval_acc = eval_acc
best_eval_acc_epoch = epoch+1
torch.save(model.state_dict(), best_acc_file)
new_best = True
if(eval_loss < best_eval_loss):
best_eval_loss = eval_loss
best_eval_loss_epoch = epoch+1
torch.save(model.state_dict(), best_loss_file)
new_best = True
if(new_best):
print("Best eval acc epoch:", best_eval_acc_epoch)
print("Best eval acc:", best_eval_acc)
print("")
print("Best eval loss epoch:", best_eval_loss_epoch)
print("Best eval loss:", best_eval_loss)
#@title Plot resulting training loss graph
tr_loss_list = [item for sublist in loss_train for item in sublist]
plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b')
plt.savefig('MidiTok-Training-Loss.png')
```
# (SAVE/LOAD)
```
#@title Save the model
print('Saving the model...')
full_path_to_model_checkpoint = "MidiTok.pth" #@param {type:"string"}
torch.save(model.state_dict(), full_path_to_model_checkpoint)
print('Done!')
#@title Load/Reload the model
full_path_to_model_checkpoint = "MidiTok.pth" #@param {type:"string"}
print('Loading the model...')
config = GPTConfig(VOCAB_SIZE,
max_seq,
dim_feedforward=dim_feedforward,
n_layer=6,
n_head=8,
n_embd=512,
enable_rpr=True,
er_len=max_seq)
model = GPT(config).to(get_device())
model.load_state_dict(torch.load(full_path_to_model_checkpoint))
print('Done!')
```
# (Generate)
```
# Continuation routine draft code
print('Loading source continuation MIDI...')
# Creates the tokenizer and loads a MIDI
tokenizer1 = REMI(pitch_range, beat_res, nb_velocities, additional_tokens)
midi1 = MidiFile('seed.mid')
# Converts MIDI to tokens, and back to a MIDI
tokens1 = tokenizer1.midi_to_tokens(midi1)
print('Done!')
# Generate from the model
## Seed generator...
### If you getting a file error on MIDI save, create test.mid file manually, then re-run
print('MidiTok Model Generator')
model.eval()
# rand_seq = model.generate(torch.Tensor(tokens1[0][-64:]), target_seq_length=1024) # Continuation example
rand_seq = model.generate(torch.Tensor([1]), target_seq_length=1024)
out = rand_seq[0].cpu().numpy().tolist()
converted_back_midi = tokenizer.tokens_to_midi([out], get_midi_programs(midi))
converted_back_midi.dump('MidiTok-OUTPUT.mid')
print('Done!')
#@title Auto-Regressive Generator
#@markdown NOTE: You much generate a seed composition first or it is not going to start
number_of_cycles_to_run = 5 #@param {type:"slider", min:1, max:50, step:1}
number_of_prime_tokens = 128 #@param {type:"slider", min:64, max:256, step:64}
print('=' * 70)
print('MidiTok Auto-Regressive Model Generator')
print('=' * 70)
print('Starting up...')
print('=' * 70)
print('Prime length:', len(out))
print('Prime tokens:', number_of_prime_tokens)
print('Prime input sequence', out[-8:])
if len(out) != 0:
print('=' * 70)
out_all = []
out_all.append(out)
for i in tqdm(range(number_of_cycles_to_run)):
rand_seq1 = model.generate(torch.Tensor(out[-number_of_prime_tokens:]), target_seq_length=1024)
out1 = rand_seq1[0].cpu().numpy().tolist()
out_all.append(out1[number_of_prime_tokens:])
out = out1[number_of_prime_tokens:]
print(chr(10))
print('=' * 70)
print('Block number:', i+1)
print('Composition length so far:', (i+1) * 1024, 'notes')
print('=' * 70)
print('Done!' * 70)
print('Total blocks:', i+1)
print('Final omposition length:', (i+1) * 1024, 'notes')
print('=' * 70)
OUT = []
for o in out_all:
OUT.extend(o)
converted_back_midi = tokenizer.tokens_to_midi([OUT], get_midi_programs(midi))
converted_back_midi.dump('MidiTok-OUTPUT.mid')
```
# Congrats! You did it! :)
|
github_jupyter
|
#@title nvidia-smi gpu check
!nvidia-smi
#@title Install all dependencies (run only once per session)
!pip install torch
!pip install tqdm
!pip install matplotlib
!pip install miditok
!wget 'https://raw.githubusercontent.com/asigalov61/tegridy-tools/main/tegridy-tools/GPT2RGA.py'
!wget 'https://github.com/asigalov61/Optimus-VIRTUOSO/raw/main/Samples/Relative-Global-Attention/Optimus-VIRTUOSO-RGA-Edition-Main-Sample.mid'
#@title Import all needed modules
print('Loading needed modules. Please wait...')
import os
from datetime import datetime
import secrets
import copy
import tqdm
from tqdm import auto
from GPT2RGA import *
from miditok import REMI, CPWord, get_midi_programs
from miditoolkit import MidiFile
import matplotlib.pyplot as plt
print('Done!')
print('Tokenizing source MIDI file...')
# Our parameters
pitch_range = range(21, 109)
beat_res = {(0, 4): 8, (4, 12): 4}
nb_velocities = 32
additional_tokens = {'Chord': True, 'Rest': True, 'Tempo': True,
'rest_range': (2, 8), # (half, 8 beats)
'nb_tempos': 32, # nb of tempo bins
'tempo_range': (40, 250), # (min, max)
'Program': False}
# Creates the tokenizer and loads a MIDI
tokenizer = REMI(pitch_range, beat_res, nb_velocities, additional_tokens) # REMI encoding
# tokenizer = CPWord(pitch_range, beat_res, nb_velocities, additional_tokens) # CP encoding
midi = MidiFile('Optimus-VIRTUOSO-RGA-Edition-Main-Sample.mid')
# Converts MIDI to tokens, and back to a MIDI
tokens = tokenizer.midi_to_tokens(midi)
print('Done!')
#@title Load processed INTs datasets
number_of_batches = 16 #@param {type:"slider", min:2, max:32, step:2}
n_workers = 6
print('=' * 50)
print('Prepping INTs datasets...')
train_data = []
train_data.extend(tokens[0])
val_dataset = train_data[:int(len(train_data) * 0.5)]
test_dataset = train_data[:int(len(train_data) * 0.5)]
train_list = train_data
val_list = val_dataset
test_list = []
print('=' * 50)
print('Processing INTs datasets...')
train_dataset = EPianoDataset(train_list, max_seq, random_seq)
val_dataset = EPianoDataset(val_list, max_seq)
test_dataset = EPianoDataset(test_list, max_seq)
print('=' * 50)
print('Loading INTs datasets...')
batch_size = number_of_batches
train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=n_workers, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=n_workers)
test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=n_workers)
print('=' * 50)
print('Total INTs in the dataset', len(train_data))
print('Total unique INTs in the dataset', len(set(train_data)))
print('Max INT in the dataset', max(train_data))
print('Min INT in the dataset', min(train_data))
print('=' * 50)
print('Checking datasets shapes...')
print('=' * 50)
print('Train loader')
for x, tgt in train_loader:
print(f'X shape: {x.shape}')
print(f'Target shape: {tgt.shape}')
break
print('=' * 50)
print('Validation loader')
for x, tgt in val_loader:
print(f'X shape: {x.shape}')
print(f'Target shape: {tgt.shape}')
break
print('=' * 50)
print('Test loader')
for x, tgt in test_loader:
print(f'X shape: {x.shape}')
print(f'Target shape: {tgt.shape}')
break
print('=' * 50)
print('Done! Enjoy! :)')
print('=' * 50)
#@title Train
print('MidiTok Model Trainer')
config = GPTConfig(VOCAB_SIZE,
max_seq,
dim_feedforward=dim_feedforward,
n_layer=6,
n_head=8,
n_embd=512,
enable_rpr=True,
er_len=max_seq)
model = GPT(config).to(get_device())
#=====
init_step = 0
lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(d_model, SCHEDULER_WARMUP_STEPS, init_step)
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD)
train_loss_func = eval_loss_func
opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
lr_scheduler = LambdaLR(opt, lr_stepper.step)
#===
best_eval_acc = 0.0
best_eval_acc_epoch = -1
best_eval_loss = float("inf")
best_eval_loss_epoch = -1
best_acc_file = 'gpt2_rpr_acc.pth'
best_loss_file = 'gpt2_rpr_loss.pth'
loss_train, loss_val, acc_val = [], [], []
for epoch in range(0, epochs):
new_best = False
loss = train(epoch+1, model, train_loader, train_loss_func, opt, lr_scheduler, num_iters=-1)
loss_train.append(loss)
eval_loss, eval_acc = eval_model(model, val_loader, eval_loss_func, num_iters=-1)
loss_val.append(eval_loss)
acc_val.append(eval_acc)
if(eval_acc > best_eval_acc):
best_eval_acc = eval_acc
best_eval_acc_epoch = epoch+1
torch.save(model.state_dict(), best_acc_file)
new_best = True
if(eval_loss < best_eval_loss):
best_eval_loss = eval_loss
best_eval_loss_epoch = epoch+1
torch.save(model.state_dict(), best_loss_file)
new_best = True
if(new_best):
print("Best eval acc epoch:", best_eval_acc_epoch)
print("Best eval acc:", best_eval_acc)
print("")
print("Best eval loss epoch:", best_eval_loss_epoch)
print("Best eval loss:", best_eval_loss)
#@title Plot resulting training loss graph
tr_loss_list = [item for sublist in loss_train for item in sublist]
plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b')
plt.savefig('MidiTok-Training-Loss.png')
#@title Save the model
print('Saving the model...')
full_path_to_model_checkpoint = "MidiTok.pth" #@param {type:"string"}
torch.save(model.state_dict(), full_path_to_model_checkpoint)
print('Done!')
#@title Load/Reload the model
full_path_to_model_checkpoint = "MidiTok.pth" #@param {type:"string"}
print('Loading the model...')
config = GPTConfig(VOCAB_SIZE,
max_seq,
dim_feedforward=dim_feedforward,
n_layer=6,
n_head=8,
n_embd=512,
enable_rpr=True,
er_len=max_seq)
model = GPT(config).to(get_device())
model.load_state_dict(torch.load(full_path_to_model_checkpoint))
print('Done!')
# Continuation routine draft code
print('Loading source continuation MIDI...')
# Creates the tokenizer and loads a MIDI
tokenizer1 = REMI(pitch_range, beat_res, nb_velocities, additional_tokens)
midi1 = MidiFile('seed.mid')
# Converts MIDI to tokens, and back to a MIDI
tokens1 = tokenizer1.midi_to_tokens(midi1)
print('Done!')
# Generate from the model
## Seed generator...
### If you getting a file error on MIDI save, create test.mid file manually, then re-run
print('MidiTok Model Generator')
model.eval()
# rand_seq = model.generate(torch.Tensor(tokens1[0][-64:]), target_seq_length=1024) # Continuation example
rand_seq = model.generate(torch.Tensor([1]), target_seq_length=1024)
out = rand_seq[0].cpu().numpy().tolist()
converted_back_midi = tokenizer.tokens_to_midi([out], get_midi_programs(midi))
converted_back_midi.dump('MidiTok-OUTPUT.mid')
print('Done!')
#@title Auto-Regressive Generator
#@markdown NOTE: You much generate a seed composition first or it is not going to start
number_of_cycles_to_run = 5 #@param {type:"slider", min:1, max:50, step:1}
number_of_prime_tokens = 128 #@param {type:"slider", min:64, max:256, step:64}
print('=' * 70)
print('MidiTok Auto-Regressive Model Generator')
print('=' * 70)
print('Starting up...')
print('=' * 70)
print('Prime length:', len(out))
print('Prime tokens:', number_of_prime_tokens)
print('Prime input sequence', out[-8:])
if len(out) != 0:
print('=' * 70)
out_all = []
out_all.append(out)
for i in tqdm(range(number_of_cycles_to_run)):
rand_seq1 = model.generate(torch.Tensor(out[-number_of_prime_tokens:]), target_seq_length=1024)
out1 = rand_seq1[0].cpu().numpy().tolist()
out_all.append(out1[number_of_prime_tokens:])
out = out1[number_of_prime_tokens:]
print(chr(10))
print('=' * 70)
print('Block number:', i+1)
print('Composition length so far:', (i+1) * 1024, 'notes')
print('=' * 70)
print('Done!' * 70)
print('Total blocks:', i+1)
print('Final omposition length:', (i+1) * 1024, 'notes')
print('=' * 70)
OUT = []
for o in out_all:
OUT.extend(o)
converted_back_midi = tokenizer.tokens_to_midi([OUT], get_midi_programs(midi))
converted_back_midi.dump('MidiTok-OUTPUT.mid')
| 0.652352 | 0.767668 |
<div>
<img src="..\Week 01\img\R_logo.svg" width="100"/>
</div>
<div style="line-height:600%;">
<font color=#1363E1 face="Britannic" size=10>
<div align=center>Data Types</div>
</font>
</div>
Generally, while doing programming in any programming language, you need to use `various variables` to store `various information`. Variables are nothing but reserved memory locations to store values. **This means that, when you create a variable you reserve some space in memory**.
You may like to store information of various data types like **character**, **wide character**, **integer**, **floating point**, **double floating point**, **Boolean* etc. Based on the data type of a variable, the operating system `allocates memory` and decides what can be stored in the reserved memory.
In contrast to other programming languages like C and java in R, the variables are not declared as some data type. The variables are assigned with R-Objects and the data type of the R-object becomes the data type of the variable. There are many types of `R-objects`. The frequently used ones are:
- Vectors
- Lists
- Matrices
- Arrays
- Factors
- Data Frames
The **simplest** of these objects is the **vector object** and there are `six data types` of these atomic vectors, also termed as six classes of vectors. The other R-Objects are built upon the atomic vectors.
<div style="line-height:300%;">
<font color=#9A0909 face="Britannic" size=6>
<div align=left>Six Data Types:</div>
</font>
</div>
<div style="line-height:100%;">
<font color=black face="Britannic" size=5>
<div align=left>Logical</div>
</font>
</div>
`Example:` TRUE, FALSE
```
# Verify:
x <- TRUE
print(class(x))
```
---
<div style="line-height:100%;">
<font color=black face="Britannic" size=5>
<div align=left>Numeric</div>
</font>
</div>
`Example:` 5, 12.2, 999
```
# Verify:
x <- 1.5
print(class(x))
```
---
<div style="line-height:100%;">
<font color=black face="Britannic" size=5>
<div align=left>Integer</div>
</font>
</div>
`Example:` 2L, 0L, 101L
```
# Verify:
x <- 2L
print(class(x))
```
---
<div style="line-height:100%;">
<font color=black face="Britannic" size=5>
<div align=left>Complex</div>
</font>
</div>
`Example:` 2 + 3i
```
# Verify:
x <- 2 + 5i
print(class(x))
```
---
<div style="line-height:100%;">
<font color=black face="Britannic" size=5>
<div align=left>Character</div>
</font>
</div>
`Example:` 'a', "good", "TRUE", '23.5'
```
# Verify:
x <- "1"
print(class(x))
```
---
<div style="line-height:100%;">
<font color=black face="Britannic" size=5>
<div align=left>Raw</div>
</font>
</div>
`Example:` "Hello" is stored as 48 65 6c 6c 6f
```
# Verify:
x <- charToRaw("Hello")
print(x)
print(class(x))
```
In R programming, the very basic data types are the `R-objects` called `vectors` which **hold elements of different classes** as shown above. Please note in R the number of classes is not confined to only the above six types. For example, we can use many atomic vectors and create an array whose class will become array.
<div style="line-height:300%;">
<font color=#9A0909 face="Britannic" size=6>
<div align=left>Vectors</div>
</font>
</div>
When you want to create vector with `more than one element`, you should use `c() function` which means to **combine the elements into a vector**.
```
# Create a vector:
info <- c(1, 2, 3L, FALSE)
print(info)
# Get the class of the vector:
print(class(info))
```
<div style="line-height:300%;">
<font color=#9A0909 face="Britannic" size=6>
<div align=left>Lists</div>
</font>
</div>
A **list** is an `R-object` which can contain many different types of elements inside it like **vectors**, **functions** and even **another list** inside it.
```
# Create a list:
list1 <- list(info, c(12, 15, 16, 18, 20, 0), FALSE, c("pooya", "maryam"))
# Print the list:
print(list1)
```
<div style="line-height:300%;">
<font color=#9A0909 face="Britannic" size=6>
<div align=left>Matrices</div>
</font>
</div>
A matrix is a `two-dimensional` rectangular data set. It can be created using a vector input to the matrix function.
```
# Create a matrix:
v = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
print(v)
print(class(v))
print("*******************************************")
M = matrix(data = v, nrow = 2, ncol = 5, byrow = TRUE)
print(M)
print(class(M))
# Create a matrix:
M = matrix(data = c('a', 'a', 'b', 'c', 'b', 1), nrow = 2, ncol = 3, byrow = TRUE)
print(M)
```
<div style="line-height:300%;">
<font color=#9A0909 face="Britannic" size=6>
<div align=left>Arrays</div>
</font>
</div>
While matrices are confined to two dimensions, arrays can be of `any number of dimensions`. The array function takes a dim attribute which creates the required number of dimension. In the below example we create an array with two elements which are 3x3 matrices each.
```
# Create an array:
a <- array(data = 1:18, dim = c(2,3,3))
print(a)
```
<div style="line-height:300%;">
<font color=#9A0909 face="Britannic" size=6>
<div align=left>Factors</div>
</font>
</div>
Factors are the r-objects which are created using a vector. It stores the vector along with the distinct values of the elements in the vector as labels. The labels are always character irrespective of whether it is numeric or character or Boolean etc. in the input vector. They are useful in statistical modeling.
Factors are created using the factor() function. The nlevels functions gives the count of levels.
```
# Create a vector:
apple_colors <- c('green', 'green', 'yellow', 'red', 'red', 'red', 'green')
print(class(apple_colors))
# Create a factor object:
factor_apple <- factor(apple_colors)
print(class(factor_apple))
# Print the factor.
print(factor_apple)
print(nlevels(factor_apple))
```
<div style="line-height:300%;">
<font color=#9A0909 face="Britannic" size=6>
<div align=left>Data Frames</div>
</font>
</div>
`Data frames` are tabular data objects. Unlike a matrix in data frame `each column` can contain `different modes of data`. The first column can be numeric while the second column can be character and third column can be logical. It is a list of vectors of equal length.
Data Frames are created using the `data.frame()` function.
```
x <- data.frame(name = c("Pooya", "Maryam", "Pedram"),
age = c(36, 35, 32),
stu = c(FALSE, TRUE, FALSE))
print(x)
# Create the data frame.
BMI <- data.frame(gender = c("Male", "Male","Female"),
height = c(152, 171.5, 165),
weight = c(81,93, 78),
Age = c(42,38,26))
print(BMI)
```
|
github_jupyter
|
# Verify:
x <- TRUE
print(class(x))
# Verify:
x <- 1.5
print(class(x))
# Verify:
x <- 2L
print(class(x))
# Verify:
x <- 2 + 5i
print(class(x))
# Verify:
x <- "1"
print(class(x))
# Verify:
x <- charToRaw("Hello")
print(x)
print(class(x))
# Create a vector:
info <- c(1, 2, 3L, FALSE)
print(info)
# Get the class of the vector:
print(class(info))
# Create a list:
list1 <- list(info, c(12, 15, 16, 18, 20, 0), FALSE, c("pooya", "maryam"))
# Print the list:
print(list1)
# Create a matrix:
v = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
print(v)
print(class(v))
print("*******************************************")
M = matrix(data = v, nrow = 2, ncol = 5, byrow = TRUE)
print(M)
print(class(M))
# Create a matrix:
M = matrix(data = c('a', 'a', 'b', 'c', 'b', 1), nrow = 2, ncol = 3, byrow = TRUE)
print(M)
# Create an array:
a <- array(data = 1:18, dim = c(2,3,3))
print(a)
# Create a vector:
apple_colors <- c('green', 'green', 'yellow', 'red', 'red', 'red', 'green')
print(class(apple_colors))
# Create a factor object:
factor_apple <- factor(apple_colors)
print(class(factor_apple))
# Print the factor.
print(factor_apple)
print(nlevels(factor_apple))
x <- data.frame(name = c("Pooya", "Maryam", "Pedram"),
age = c(36, 35, 32),
stu = c(FALSE, TRUE, FALSE))
print(x)
# Create the data frame.
BMI <- data.frame(gender = c("Male", "Male","Female"),
height = c(152, 171.5, 165),
weight = c(81,93, 78),
Age = c(42,38,26))
print(BMI)
| 0.251372 | 0.957912 |
<center>
<img src="https://gitlab.com/ibm/skills-network/courses/placeholder101/-/raw/master/labs/module%201/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" />
</center>
# **SpaceX Falcon 9 first stage Landing Prediction**
# Lab 1: Collecting the data
Estimated time needed: **45** minutes
In this capstone, we will predict if the Falcon 9 first stage will land successfully. SpaceX advertises Falcon 9 rocket launches on its website with a cost of 62 million dollars; other providers cost upward of 165 million dollars each, much of the savings is because SpaceX can reuse the first stage. Therefore if we can determine if the first stage will land, we can determine the cost of a launch. This information can be used if an alternate company wants to bid against SpaceX for a rocket launch. In this lab, you will collect and make sure the data is in the correct format from an API. The following is an example of a successful and launch.

Several examples of an unsuccessful landing are shown here:

Most unsuccessful landings are planned. Space X performs a controlled landing in the oceans.
## Objectives
In this lab, you will make a get request to the SpaceX API. You will also do some basic data wrangling and formating.
* Request to the SpaceX API
* Clean the requested data
***
## Import Libraries and Define Auxiliary Functions
We will import the following libraries into the lab
```
# Requests allows us to make HTTP requests which we will use to get data from an API
import requests
# Pandas is a software library written for the Python programming language for data manipulation and analysis.
import pandas as pd
# NumPy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays
import numpy as np
# Datetime is a library that allows us to represent dates
import datetime
# Setting this option will print all collumns of a dataframe
pd.set_option('display.max_columns', None)
# Setting this option will print all of the data in a feature
pd.set_option('display.max_colwidth', None)
```
Below we will define a series of helper functions that will help us use the API to extract information using identification numbers in the launch data.
From the <code>rocket</code> column we would like to learn the booster name.
```
# Takes the dataset and uses the rocket column to call the API and append the data to the list
def getBoosterVersion(data):
for x in data['rocket']:
response = requests.get("https://api.spacexdata.com/v4/rockets/"+str(x)).json()
BoosterVersion.append(response['name'])
```
From the <code>launchpad</code> we would like to know the name of the launch site being used, the logitude, and the latitude.
```
# Takes the dataset and uses the launchpad column to call the API and append the data to the list
def getLaunchSite(data):
for x in data['launchpad']:
response = requests.get("https://api.spacexdata.com/v4/launchpads/"+str(x)).json()
Longitude.append(response['longitude'])
Latitude.append(response['latitude'])
LaunchSite.append(response['name'])
```
From the <code>payload</code> we would like to learn the mass of the payload and the orbit that it is going to.
```
# Takes the dataset and uses the payloads column to call the API and append the data to the lists
def getPayloadData(data):
for load in data['payloads']:
response = requests.get("https://api.spacexdata.com/v4/payloads/"+load).json()
PayloadMass.append(response['mass_kg'])
Orbit.append(response['orbit'])
```
From <code>cores</code> we would like to learn the outcome of the landing, the type of the landing, number of flights with that core, whether gridfins were used, wheter the core is reused, wheter legs were used, the landing pad used, the block of the core which is a number used to seperate version of cores, the number of times this specific core has been reused, and the serial of the core.
```
# Takes the dataset and uses the cores column to call the API and append the data to the lists
def getCoreData(data):
for core in data['cores']:
if core['core'] != None:
response = requests.get("https://api.spacexdata.com/v4/cores/"+core['core']).json()
Block.append(response['block'])
ReusedCount.append(response['reuse_count'])
Serial.append(response['serial'])
else:
Block.append(None)
ReusedCount.append(None)
Serial.append(None)
Outcome.append(str(core['landing_success'])+' '+str(core['landing_type']))
Flights.append(core['flight'])
GridFins.append(core['gridfins'])
Reused.append(core['reused'])
Legs.append(core['legs'])
LandingPad.append(core['landpad'])
```
Now let's start requesting rocket launch data from SpaceX API with the following URL:
```
spacex_url="https://api.spacexdata.com/v4/launches/past"
response = requests.get(spacex_url)
```
Check the content of the response
```
print(response.content)
```
You should see the response contains massive information about SpaceX launches. Next, let's try to discover some more relevant information for this project.
### Task 1: Request and parse the SpaceX launch data using the GET request
To make the requested JSON results more consistent, we will use the following static response object for this project:
```
static_json_url='https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/datasets/API_call_spacex_api.json'
```
We should see that the request was successfull with the 200 status response code
```
response.status_code
```
Now we decode the response content as a Json using <code>.json()</code> and turn it into a Pandas dataframe using <code>.json_normalize()</code>
```
# Use json_normalize method to convert the json result into a dataframe
data = pd.json_normalize(response.json())
```
Using the dataframe <code>data</code> print the first 5 rows
```
# Get the head of the dataframe
data.head(5)
```
You will notice that a lot of the data are IDs. For example the rocket column has no information about the rocket just an identification number.
We will now use the API again to get information about the launches using the IDs given for each launch. Specifically we will be using columns <code>rocket</code>, <code>payloads</code>, <code>launchpad</code>, and <code>cores</code>.
```
# Lets take a subset of our dataframe keeping only the features we want and the flight number, and date_utc.
data = data[['rocket', 'payloads', 'launchpad', 'cores', 'flight_number', 'date_utc']]
# We will remove rows with multiple cores because those are falcon rockets with 2 extra rocket boosters and rows that have multiple payloads in a single rocket.
data = data[data['cores'].map(len)==1]
data = data[data['payloads'].map(len)==1]
# Since payloads and cores are lists of size 1 we will also extract the single value in the list and replace the feature.
data['cores'] = data['cores'].map(lambda x : x[0])
data['payloads'] = data['payloads'].map(lambda x : x[0])
# We also want to convert the date_utc to a datetime datatype and then extracting the date leaving the time
data['date'] = pd.to_datetime(data['date_utc']).dt.date
# Using the date we will restrict the dates of the launches
data = data[data['date'] <= datetime.date(2020, 11, 13)]
```
* From the <code>rocket</code> we would like to learn the booster name
* From the <code>payload</code> we would like to learn the mass of the payload and the orbit that it is going to
* From the <code>launchpad</code> we would like to know the name of the launch site being used, the longitude, and the latitude.
* From <code>cores</code> we would like to learn the outcome of the landing, the type of the landing, number of flights with that core, whether gridfins were used, whether the core is reused, whether legs were used, the landing pad used, the block of the core which is a number used to seperate version of cores, the number of times this specific core has been reused, and the serial of the core.
The data from these requests will be stored in lists and will be used to create a new dataframe.
```
#Global variables
BoosterVersion = []
PayloadMass = []
Orbit = []
LaunchSite = []
Outcome = []
Flights = []
GridFins = []
Reused = []
Legs = []
LandingPad = []
Block = []
ReusedCount = []
Serial = []
Longitude = []
Latitude = []
```
These functions will apply the outputs globally to the above variables. Let's take a looks at <code>BoosterVersion</code> variable. Before we apply <code>getBoosterVersion</code> the list is empty:
```
BoosterVersion
```
Now, let's apply <code> getBoosterVersion</code> function method to get the booster version
```
# Call getBoosterVersion
getBoosterVersion(data)
```
the list has now been update
```
BoosterVersion[0:5]
```
we can apply the rest of the functions here:
```
# Call getLaunchSite
getLaunchSite(data)
# Call getPayloadData
getPayloadData(data)
# Call getCoreData
getCoreData(data)
```
Finally lets construct our dataset using the data we have obtained. We we combine the columns into a dictionary.
```
launch_dict = {'FlightNumber': list(data['flight_number']),
'Date': list(data['date']),
'BoosterVersion':BoosterVersion,
'PayloadMass':PayloadMass,
'Orbit':Orbit,
'LaunchSite':LaunchSite,
'Outcome':Outcome,
'Flights':Flights,
'GridFins':GridFins,
'Reused':Reused,
'Legs':Legs,
'LandingPad':LandingPad,
'Block':Block,
'ReusedCount':ReusedCount,
'Serial':Serial,
'Longitude': Longitude,
'Latitude': Latitude}
```
Then, we need to create a Pandas data frame from the dictionary launch_dict.
```
# Create a data from launch_dict
data1 = pd.DataFrame.from_dict(launch_dict)
```
Show the summary of the dataframe
```
# Show the head of the dataframe
data1.head()
```
### Task 2: Filter the dataframe to only include `Falcon 9` launches
Finally we will remove the Falcon 1 launches keeping only the Falcon 9 launches. Filter the data dataframe using the <code>BoosterVersion</code> column to only keep the Falcon 9 launches. Save the filtered data to a new dataframe called <code>data_falcon9</code>.
```
# Hint data['BoosterVersion']!='Falcon 1'
data_falcon9 = data1[data1['BoosterVersion']!='Falcon 1']
```
Now that we have removed some values we should reset the FlgihtNumber column
```
data_falcon9.loc[:,'FlightNumber'] = list(range(1, data_falcon9.shape[0]+1))
data_falcon9
```
## Data Wrangling
We can see below that some of the rows are missing values in our dataset.
```
data_falcon9.isnull().sum()
```
Before we can continue we must deal with these missing values. The <code>LandingPad</code> column will retain None values to represent when landing pads were not used.
### Task 3: Dealing with Missing Values
Calculate below the mean for the <code>PayloadMass</code> using the <code>.mean()</code>. Then use the mean and the <code>.replace()</code> function to replace `np.nan` values in the data with the mean you calculated.
```
# Calculate the mean value of PayloadMass column
# Replace the np.nan values with its mean value
data_falcon9['PayloadMass'].replace(np.nan, data_falcon9['PayloadMass'].mean(), inplace=True)
```
You should see the number of missing values of the <code>PayLoadMass</code> change to zero.
Now we should have no missing values in our dataset except for in <code>LandingPad</code>.
```
#Confirming the above removal of nulls functioned as intended
data_falcon9.isnull().sum()
```
We can now export it to a <b>CSV</b> for the next section,but to make the answers consistent, in the next lab we will provide data in a pre-selected date range.
<code>data_falcon9.to_csv('dataset_part\_1.csv', index=False)</code>
## Authors
<a href="https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01">Joseph Santarcangelo</a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
## Change Log
| Date (YYYY-MM-DD) | Version | Changed By | Change Description |
| ----------------- | ------- | ---------- | ----------------------------------- |
| 2020-09-20 | 1.1 | Joseph | get result each time you run |
| 2020-09-20 | 1.1 | Azim | Created Part 1 Lab using SpaceX API |
| 2020-09-20 | 1.0 | Joseph | Modified Multiple Areas |
Copyright © 2021 IBM Corporation. All rights reserved.
|
github_jupyter
|
# Requests allows us to make HTTP requests which we will use to get data from an API
import requests
# Pandas is a software library written for the Python programming language for data manipulation and analysis.
import pandas as pd
# NumPy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays
import numpy as np
# Datetime is a library that allows us to represent dates
import datetime
# Setting this option will print all collumns of a dataframe
pd.set_option('display.max_columns', None)
# Setting this option will print all of the data in a feature
pd.set_option('display.max_colwidth', None)
# Takes the dataset and uses the rocket column to call the API and append the data to the list
def getBoosterVersion(data):
for x in data['rocket']:
response = requests.get("https://api.spacexdata.com/v4/rockets/"+str(x)).json()
BoosterVersion.append(response['name'])
# Takes the dataset and uses the launchpad column to call the API and append the data to the list
def getLaunchSite(data):
for x in data['launchpad']:
response = requests.get("https://api.spacexdata.com/v4/launchpads/"+str(x)).json()
Longitude.append(response['longitude'])
Latitude.append(response['latitude'])
LaunchSite.append(response['name'])
# Takes the dataset and uses the payloads column to call the API and append the data to the lists
def getPayloadData(data):
for load in data['payloads']:
response = requests.get("https://api.spacexdata.com/v4/payloads/"+load).json()
PayloadMass.append(response['mass_kg'])
Orbit.append(response['orbit'])
# Takes the dataset and uses the cores column to call the API and append the data to the lists
def getCoreData(data):
for core in data['cores']:
if core['core'] != None:
response = requests.get("https://api.spacexdata.com/v4/cores/"+core['core']).json()
Block.append(response['block'])
ReusedCount.append(response['reuse_count'])
Serial.append(response['serial'])
else:
Block.append(None)
ReusedCount.append(None)
Serial.append(None)
Outcome.append(str(core['landing_success'])+' '+str(core['landing_type']))
Flights.append(core['flight'])
GridFins.append(core['gridfins'])
Reused.append(core['reused'])
Legs.append(core['legs'])
LandingPad.append(core['landpad'])
spacex_url="https://api.spacexdata.com/v4/launches/past"
response = requests.get(spacex_url)
print(response.content)
static_json_url='https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/datasets/API_call_spacex_api.json'
response.status_code
# Use json_normalize method to convert the json result into a dataframe
data = pd.json_normalize(response.json())
# Get the head of the dataframe
data.head(5)
# Lets take a subset of our dataframe keeping only the features we want and the flight number, and date_utc.
data = data[['rocket', 'payloads', 'launchpad', 'cores', 'flight_number', 'date_utc']]
# We will remove rows with multiple cores because those are falcon rockets with 2 extra rocket boosters and rows that have multiple payloads in a single rocket.
data = data[data['cores'].map(len)==1]
data = data[data['payloads'].map(len)==1]
# Since payloads and cores are lists of size 1 we will also extract the single value in the list and replace the feature.
data['cores'] = data['cores'].map(lambda x : x[0])
data['payloads'] = data['payloads'].map(lambda x : x[0])
# We also want to convert the date_utc to a datetime datatype and then extracting the date leaving the time
data['date'] = pd.to_datetime(data['date_utc']).dt.date
# Using the date we will restrict the dates of the launches
data = data[data['date'] <= datetime.date(2020, 11, 13)]
#Global variables
BoosterVersion = []
PayloadMass = []
Orbit = []
LaunchSite = []
Outcome = []
Flights = []
GridFins = []
Reused = []
Legs = []
LandingPad = []
Block = []
ReusedCount = []
Serial = []
Longitude = []
Latitude = []
BoosterVersion
# Call getBoosterVersion
getBoosterVersion(data)
BoosterVersion[0:5]
# Call getLaunchSite
getLaunchSite(data)
# Call getPayloadData
getPayloadData(data)
# Call getCoreData
getCoreData(data)
launch_dict = {'FlightNumber': list(data['flight_number']),
'Date': list(data['date']),
'BoosterVersion':BoosterVersion,
'PayloadMass':PayloadMass,
'Orbit':Orbit,
'LaunchSite':LaunchSite,
'Outcome':Outcome,
'Flights':Flights,
'GridFins':GridFins,
'Reused':Reused,
'Legs':Legs,
'LandingPad':LandingPad,
'Block':Block,
'ReusedCount':ReusedCount,
'Serial':Serial,
'Longitude': Longitude,
'Latitude': Latitude}
# Create a data from launch_dict
data1 = pd.DataFrame.from_dict(launch_dict)
# Show the head of the dataframe
data1.head()
# Hint data['BoosterVersion']!='Falcon 1'
data_falcon9 = data1[data1['BoosterVersion']!='Falcon 1']
data_falcon9.loc[:,'FlightNumber'] = list(range(1, data_falcon9.shape[0]+1))
data_falcon9
data_falcon9.isnull().sum()
# Calculate the mean value of PayloadMass column
# Replace the np.nan values with its mean value
data_falcon9['PayloadMass'].replace(np.nan, data_falcon9['PayloadMass'].mean(), inplace=True)
#Confirming the above removal of nulls functioned as intended
data_falcon9.isnull().sum()
| 0.516108 | 0.985229 |
# Implementing the Gradient Descent Algorithm
In this lab, we'll implement the basic functions of the Gradient Descent algorithm to find the boundary in a small dataset. First, we'll start with some functions that will help us plot and visualize the data.
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#Some helper functions for plotting and drawing lines
def plot_points(X, y):
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k')
def display(m, b, color='g--'):
plt.xlim(-0.05,1.05)
plt.ylim(-0.05,1.05)
x = np.arange(-10, 10, 0.1)
plt.plot(x, m*x+b, color)
```
## Reading and plotting the data
```
data = pd.read_csv('data.csv', header=None)
X = np.array(data[[0,1]])
y = np.array(data[2])
plot_points(X,y)
plt.show()
```
## TODO: Implementing the basic functions
Here is your turn to shine. Implement the following formulas, as explained in the text.
- Sigmoid activation function
$$\sigma(x) = \frac{1}{1+e^{-x}}$$
- Output (prediction) formula
$$\hat{y} = \sigma(w_1 x_1 + w_2 x_2 + b)$$
- Error function
$$Error(y, \hat{y}) = - y \log(\hat{y}) - (1-y) \log(1-\hat{y})$$
- The function that updates the weights
$$ w_i \longrightarrow w_i + \alpha (y - \hat{y}) x_i$$
$$ b \longrightarrow b + \alpha (y - \hat{y})$$
```
# Implement the following functions
# Activation (sigmoid) function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Output (prediction) formula
def output_formula(features, weights, bias):
return sigmoid(np.dot(features, weights) + bias)
# Error (log-loss) formula
def error_formula(y, output):
return -y * np.log(output) - (1 - y) * np.log(1 - output)
# Gradient descent step
def update_weights(x, y, weights, bias, learnrate):
output = output_formula(x, weights, bias)
d_error = y - output
weights += learnrate * d_error * x
bias += learnrate * d_error
return weights, bias
```
## Training function
This function will help us iterate the gradient descent algorithm through all the data, for a number of epochs. It will also plot the data, and some of the boundary lines obtained as we run the algorithm.
```
np.random.seed(44)
epochs = 100
learnrate = 0.01
def train(features, targets, epochs, learnrate, graph_lines=False):
errors = []
n_records, n_features = features.shape
last_loss = None
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
bias = 0
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features, targets):
output = output_formula(x, weights, bias)
error = error_formula(y, output)
weights, bias = update_weights(x, y, weights, bias, learnrate)
# Printing out the log-loss error on the training set
out = output_formula(features, weights, bias)
loss = np.mean(error_formula(targets, out))
errors.append(loss)
if e % (epochs / 10) == 0:
print("\n========== Epoch", e,"==========")
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
predictions = out > 0.5
accuracy = np.mean(predictions == targets)
print("Accuracy: ", accuracy)
if graph_lines and e % (epochs / 100) == 0:
display(-weights[0]/weights[1], -bias/weights[1])
# Plotting the solution boundary
plt.title("Solution boundary")
display(-weights[0]/weights[1], -bias/weights[1], 'black')
# Plotting the data
plot_points(features, targets)
plt.show()
# Plotting the error
plt.title("Error Plot")
plt.xlabel('Number of epochs')
plt.ylabel('Error')
plt.plot(errors)
plt.show()
```
## Time to train the algorithm!
When we run the function, we'll obtain the following:
- 10 updates with the current training loss and accuracy
- A plot of the data and some of the boundary lines obtained. The final one is in black. Notice how the lines get closer and closer to the best fit, as we go through more epochs.
- A plot of the error function. Notice how it decreases as we go through more epochs.
```
train(X, y, epochs, learnrate, True)
```
|
github_jupyter
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#Some helper functions for plotting and drawing lines
def plot_points(X, y):
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k')
def display(m, b, color='g--'):
plt.xlim(-0.05,1.05)
plt.ylim(-0.05,1.05)
x = np.arange(-10, 10, 0.1)
plt.plot(x, m*x+b, color)
data = pd.read_csv('data.csv', header=None)
X = np.array(data[[0,1]])
y = np.array(data[2])
plot_points(X,y)
plt.show()
# Implement the following functions
# Activation (sigmoid) function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Output (prediction) formula
def output_formula(features, weights, bias):
return sigmoid(np.dot(features, weights) + bias)
# Error (log-loss) formula
def error_formula(y, output):
return -y * np.log(output) - (1 - y) * np.log(1 - output)
# Gradient descent step
def update_weights(x, y, weights, bias, learnrate):
output = output_formula(x, weights, bias)
d_error = y - output
weights += learnrate * d_error * x
bias += learnrate * d_error
return weights, bias
np.random.seed(44)
epochs = 100
learnrate = 0.01
def train(features, targets, epochs, learnrate, graph_lines=False):
errors = []
n_records, n_features = features.shape
last_loss = None
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
bias = 0
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features, targets):
output = output_formula(x, weights, bias)
error = error_formula(y, output)
weights, bias = update_weights(x, y, weights, bias, learnrate)
# Printing out the log-loss error on the training set
out = output_formula(features, weights, bias)
loss = np.mean(error_formula(targets, out))
errors.append(loss)
if e % (epochs / 10) == 0:
print("\n========== Epoch", e,"==========")
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
predictions = out > 0.5
accuracy = np.mean(predictions == targets)
print("Accuracy: ", accuracy)
if graph_lines and e % (epochs / 100) == 0:
display(-weights[0]/weights[1], -bias/weights[1])
# Plotting the solution boundary
plt.title("Solution boundary")
display(-weights[0]/weights[1], -bias/weights[1], 'black')
# Plotting the data
plot_points(features, targets)
plt.show()
# Plotting the error
plt.title("Error Plot")
plt.xlabel('Number of epochs')
plt.ylabel('Error')
plt.plot(errors)
plt.show()
train(X, y, epochs, learnrate, True)
| 0.773216 | 0.984276 |
<a href="https://colab.research.google.com/github/pranaytelukuntla/TSF-TASK-1/blob/main/TSF_TASK_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# **PRANAY T**
## **GRADUATE ROTATIONAL INTERNSHIP PROGRAM-(THE SPARK FOUNDATION)**
### **TASK-1: PREDICTION USING SUPERVISED MACHINE LEARNING**
**IMPORTING REQUIRED LIBRARIES**
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
```
**STEP-1: READING THE IRIS DATASET**
```
dataset = pd.read_csv("https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv")
print("Data imported successfully")
dataset
```
**THE FIRST FIVE ELEMENTS IN THE DATASET:**
```
dataset.head()
dataset.describe()
```
**SUMMARY OF THE DATASET:**
```
dataset.info()
```
**NUMBER OF ROWS AND COLUMNS**
```
dataset.shape
```
**STEP-2: DATA VISUALIZATION**
```
plt.scatter(dataset['Hours'], dataset['Scores'])
plt.title('Hours vs Percentage')
plt.xlabel('Studied Hours')
plt.ylabel('Scores')
plt.show()
```
**STEP-3: TRAINING OF MODEL**
```
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2, random_state=0)
```
**TRAINING THE SIMPLE LINEAR REGRESSION MODEL ON THE TRAINING SET**
```
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
```
**STEP-4: PLOTTING**
```
# Plotting the regression line
line = regressor.coef_*X+regressor.intercept_
# Plotting for the test data
plt.title("Training set");
plt.ylabel('Scores')
plt.xlabel('Hours')
plt.scatter(X, y,color="blue")
plt.plot(X, line,color="red");
plt.show()
```
**STEP-5: PREDICTING AND COMPARING**
```
print(X_test)
y_pred = regressor.predict(X_test)
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df
print('Training score :', regressor.score(X_train, y_train))
print('Test score :', regressor.score(X_test, y_test))
df.plot(kind='bar',figsize=(5,5))
plt.grid(which='major', linewidth='0.5', color='red')
plt.grid(which='minor', linewidth='0.5', color='yellow')
plt.show()
Hours= 9.25
test=np.array([Hours])
test=test.reshape(-1,1)
own_pred = regressor.predict(test)
print("No of Hours = {}".format(Hours))
print("Predicted Score = {}".format(own_pred[0]))
```
**STEP-6: FINAL ANALYSIS**
```
from sklearn import metrics
print('Mean Absolute Error:',metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:',metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:',np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-2:',metrics.r2_score(y_test, y_pred))
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv("https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv")
print("Data imported successfully")
dataset
dataset.head()
dataset.describe()
dataset.info()
dataset.shape
plt.scatter(dataset['Hours'], dataset['Scores'])
plt.title('Hours vs Percentage')
plt.xlabel('Studied Hours')
plt.ylabel('Scores')
plt.show()
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2, random_state=0)
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Plotting the regression line
line = regressor.coef_*X+regressor.intercept_
# Plotting for the test data
plt.title("Training set");
plt.ylabel('Scores')
plt.xlabel('Hours')
plt.scatter(X, y,color="blue")
plt.plot(X, line,color="red");
plt.show()
print(X_test)
y_pred = regressor.predict(X_test)
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df
print('Training score :', regressor.score(X_train, y_train))
print('Test score :', regressor.score(X_test, y_test))
df.plot(kind='bar',figsize=(5,5))
plt.grid(which='major', linewidth='0.5', color='red')
plt.grid(which='minor', linewidth='0.5', color='yellow')
plt.show()
Hours= 9.25
test=np.array([Hours])
test=test.reshape(-1,1)
own_pred = regressor.predict(test)
print("No of Hours = {}".format(Hours))
print("Predicted Score = {}".format(own_pred[0]))
from sklearn import metrics
print('Mean Absolute Error:',metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:',metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:',np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-2:',metrics.r2_score(y_test, y_pred))
| 0.75392 | 0.956268 |
# The Secret to Getting the Second Date
### *Do you believe in love at first sight, or should I walk by again? - Anonymous*
An analysis on the Speed Dating dataset from Kaggle: https://www.kaggle.com/annavictoria/speed-dating-experiment

## Introduction
What makes people attractive? What gets people a second date? Why isn't garlic breath a variable in this dataset?
**We're about to find out.** (Hang in there, single people!)
### About the data:
* Data was gathered from 552 participants in experimental speed dating events from 2002-2004.
* During the events, the attendees would have a four minute "first date" with every other participant of the opposite sex.
* At the end of their four minutes, participants were asked if they would like to see their date again. They were also asked to rate their date on six attributes:
* Attractiveness
* Sincerity
* Intelligence
* Fun
* Ambition
* Shared Interests.
* The dataset also includes questionnaire data gathered from participants at different points in the process. These fields include:
* demographics
* dating habits
* self-perception across key attributes
* beliefs on what others find valuable in a mate
* lifestyle information
See the speed-dating-data-key.doc for data dictionary and question key.
## Table of Contents
I. [Data Cleaning and EDA](#cln)
* [Heatmap](#hm)
* [Interest Overlap](#io)
* [Decisions by Gender](#dg)
* [Unrequited Love](#url)
* [OLS Model](#ols)
II. [Modeling](#mdl)
* [Logistic Regression](#log)
* [Random Forest](#rf)
* [XGBoost](#xgb)
III. [Conclusion](#ccn)
```
# importing packages
%matplotlib inline
import pandas as pd
pd.options.display.max_rows = 1000 #handy for showing truncated results
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import metrics
# importing data
dating = pd.read_csv("speed-dating-data.csv", encoding="ISO-8859-1") # use this encoding, funky error otherwise. thanks kagglers!
dating.head(5)
# counting null values
dating.isnull().sum()
```
## <a id="cln"> Data Cleaning and EDA</a>
From the peek at the data we had above, there are a ton of fields with NaNs. A lot of NaNs. There are 8,378 rows and a bunch of fields have thousands of NaNs and probably bad practice to use imputation to guess the values. Instead, I'll just disregard these fields with over 4000 null values from the dataset and narrow my analysis to the fields that I can use. First though, let's take a look at some totals before we throw out some fields and rows due to missing values.
```
# age distribution of participants
age = dating[np.isfinite(dating['age'])]['age']
plt.hist(age.values)
plt.xlabel('Age')
plt.ylabel('Frequency')
```
So most of the participants were in their mid twenties to early thirties.
```
# out of curiosity, I want to see how many speed daters found a match!
pd.crosstab(index=dating['match'],columns="count")
```
It looks like only about **20% of people found themselves a second date** after the speed dating session!
```
# narrowing dataset
dating_1 = dating.iloc[:, 11:28]
dating_2 = dating.iloc[:, 30:35]
dating_3 = dating.iloc[:, 39:43]
dating_4 = dating.iloc[:, 45:67]
dating_5 = dating.iloc[:, 69:74]
dating_6 = dating.iloc[:, 87:91]
dating_7 = dating.iloc[:, 97:102]
dating_8 = dating.iloc[:, 104:107]
date = pd.concat([dating.iloc[:, 0],dating.iloc[:, 2],dating_1,dating_2,dating_3,dating_4,dating_5,
dating_6,dating_7,dating_8], axis=1)
# counting null values
date.isnull().sum()
# removing null rows now that the nulls are in the hundreds and not the thousands
date2 = date.dropna()
# checking datatypes
date2.dtypes
# creating an object-free dataframe for later use
date3 = date2.drop(['field', 'from', 'career'], axis=1)
# heatmap
plt.subplots(figsize=(20,15))
ax = plt.axes()
ax.set_title("Correlation Heatmap")
corr = date3.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
```
In looking at the <a id="hm">heatmap</a> above, there are some interesting observations.
For example, men (gender = 1) seem to have a preference for attractive partners (attr1_1) while women (gender = 0) seem to have a preference for ambitious partners (amb1_1)!
In other news, women like shopping and yoga and men like gaming, so at least we know this heatmap is working. Let's look into the <a id="hm">interests</a> some more!
```
# just for fun, looking at interest overlap
sns.set(style="ticks", color_codes=True)
date_int = pd.concat([date3.iloc[:, 1],date3.iloc[:, 30:32]],axis = 1)
g = sns.pairplot(date_int, hue="gender")
# just for fun, looking at interest overlap
sns.set(style="ticks", color_codes=True)
date_int = pd.concat([date3.iloc[:, 1],date3.iloc[:, 33:36]],axis = 1)
g = sns.pairplot(date_int, hue="gender")
# just for fun, looking at interest overlap
sns.set(style="ticks", color_codes=True)
date_int2 = pd.concat([date3.iloc[:, 1],date3.iloc[:, 41:44]],axis = 1)
g = sns.pairplot(date_int2, hue="gender")
# just for fun, looking at interest overlap
sns.set(style="ticks", color_codes=True)
date_int3 = pd.concat([date3.iloc[:, 1],date3.iloc[:, 45:47]],axis = 1)
g = sns.pairplot(date_int3, hue="gender")
```
Just for fun, I wanted to see how interests differed by gender. The histograms above show that **if you want to suggest a date that both genders are likely to enjoy, choose movies or dining!**
However, referring back to the heatmap none of the interests really correlated with match, so that gives me the go-ahead to remove interests from my match prediction model!
It also looks like match (the variable we are interested in) is mostly correlated to the decisions of the partners, and the partner's attributes (how well they rated the partner's attractiveness, sincerity, intelligence, fun, ambition, and how much they liked them).
Interestingly, match was not correlated to age or race, or even how similar the participant and partner's interests are (int_corr)! Love does not discriminate according to the data!
```
# removing interests
date4 = date3.drop(['sports', 'tvsports', 'exercise', 'dining', 'museums', 'art', 'hiking',
'gaming', 'clubbing', 'reading', 'tv', 'theater', 'movies', 'concerts', 'music',
'shopping', 'yoga'], axis=1)
```
Going along investigating further <a id="gd">gender differences</a>, I wonder... how many of each gender are there and does that affect the other person's decision? That is, do women receive more positive final decisions from the other person (dec_o) than men do?
```
# looking at dec_o by gender
g = sns.FacetGrid(date4, col="gender")
g = g.map(plt.hist, "dec_o")
plt.ticklabel_format(useOffset=False, style='plain')
# chi-square test
gender_crosstab = pd.crosstab(index=date4.gender, columns=date4.dec_o)
gender_table = sm.stats.Table(gender_crosstab)
gender_rslt = gender_table.test_nominal_association()
gender_rslt.pvalue
```
It looks like women received about 1750 'no' and about 1600 'yes' for the decision question "Would you like to see him or her again?". Men received about 2050 'no' and about 1300 'yes'. In other words, **men are more likely to be rejected by women than women are to be rejected by men** (wahwah). This is a statistically significant difference as confirmed by the above chi-squared test p-value. Poor guys!
Now, I wonder, how many interactions were <a id="url">unrequited love</a>? That is, getting the count of rows where dec_o = 1 AND dec = 0 OR a dec = 1 AND dec_o = 0?
```
# unrequited love count
no_love_count = len(date4[(date4['dec_o']==0) & (date4['dec']==1)])
+ len(date4[(date4['dec_o']==1) & (date4['dec']==0)])
perc_broken_heart = no_love_count / len(date4.index)
perc_broken_heart*100
```
So it seems **26% of participants unfortunately had their heart broken**. More than the percentage of people who got a second date!
On an unrelated note, I wonder if the incidence of unrequited love differs by the attractiveness of the partner.
```
# encoding unrequited love as a new column
date4['url']=np.where(((date4['dec_o']==0) & (date4['dec']==1))|((date4['dec']==0) & (date4['dec_o']==1)),1,0)
# looking at url by attractiveness
plt.figure(figsize=(7,9))
sns.boxplot(x='url', y='attr', data=date4, palette='cool')
plt.title('Broken Hearts by Attractiveness of Partner', fontsize=20)
plt.xlabel('Broken Heart', fontsize=16)
# chi-square test
bh_crosstab = pd.crosstab(index=date4.attr, columns=date4.url)
bh_table = sm.stats.Table(bh_crosstab)
bh_rslt = bh_table.test_nominal_association()
bh_rslt.pvalue
```
Looks like the difference in attractiveness was not statistically significant. So the good news is, the likelihood of getting rejected is not dependent on your attractiveness!
```
date5 = pd.concat([date4['attr3_1'],date4['sinc3_1'],date4['intel3_1'],date4['fun3_1'],date4['attr_o'],
date4['sinc_o'],date4['intel_o'],date4['fun_o'],date4['like'],date4['like_o'],
date4['int_corr'],date4['url']],axis=1)
plt.subplots(figsize=(15,10))
ax = plt.axes()
ax.set_title("Correlation Heatmap")
corr = date5.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
```
It looks like regardless of your attractiveness, or any other trait for that matter, you are just as likely to experience heartbreak!
It also looks like typically, your own opinion of how attractive you are (attr3_1) is only weakly correlated with how attractive your date finds you (attr_o)! And in fact, there is nearly no correlation between how smart or sincere you think you are versus how smart and sincere your date thinks of you! Perhaps these are tough qualities to get across in a 4 minute date!
So that brings up the question, between attractiveness, sincerity, intelligence, fun, ambition, and liking, what was the most influential in the final decision (dec)? I'll run a <a id="ols">linear regression model</a>.
```
# OLS model with coefficients
X_ols = date4[['attr','sinc','intel','fun','like','int_corr']]
y_ols = date4.dec
traits = sm.OLS(y_ols, X_ols)
results_traits = traits.fit()
results_traits.summary()
```
It turns out that being intelligent or sincere or having similar interests actually slightly hurts your chances at securing that second date! Don't panic though, this is just from 4 minutes of meeting each other! We might take this as advice to focus on breaking the ice and being more fun and likeable in the first date!
Now let's run an OLS but with both respondents' ratings instead of just one, and this time on match instead of decision (dec).
```
# OLS model with coefficients
X_ols = date4[['dec','dec_o','attr','attr_o','fun','fun_o','like','like_o','int_corr']]
y_ols = date4.match
traits = sm.OLS(y_ols, X_ols)
results_traits = traits.fit()
results_traits.summary()
```
**From the coefficients, it looks like all that really matters is the decision of both participants, and perhaps whether or not they liked one another.**
## <a id="mdl"> Modeling - Classification </a>
```
# preparing the data
X=date4[['like','dec']]
y=date4['match']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, stratify=y)
# logistic regression classification model
model = LogisticRegression(C=1, random_state=0)
lrc = model.fit(X_train, y_train)
predict_train_lrc = lrc.predict(X_train)
predict_test_lrc = lrc.predict(X_test)
print('Training Accuracy:', metrics.accuracy_score(y_train, predict_train_lrc))
print('Validation Accuracy:', metrics.accuracy_score(y_test, predict_test_lrc))
```
Without knowing what the partner's decision is (dec_o), it turns out that given how much the respondent likes the partner and what the respondent's decision is, we have about an 82.5% accuracy in predicting a match on both the training and the validation using logistic regression. This makes sense given that we know only 26% of people were heartbroken -- if you like someone, odds are they will like you back!
Let's try some other models to see if we can get closer to predicting a match.
```
# random forest model
model = RandomForestClassifier()
rf_model = model.fit(X_train, y_train)
predict_train_rf = rf_model.predict(X_train)
predict_test_rf = rf_model.predict(X_test)
print('Training Accuracy:', metrics.accuracy_score(y_train, predict_train_rf))
print('Validation Accuracy:', metrics.accuracy_score(y_test, predict_test_rf))
```
<a id="rf">Random forest</a> gave us a slightly more accurate model at 82.9% accuracy in train and 82.8% in test.
```
# xgboost model
model = GradientBoostingClassifier()
xgb_model = model.fit(X_train, y_train)
predict_train_xgb = xgb_model.predict(X_train)
predict_test_xgb = xgb_model.predict(X_test)
print('Training Accuracy:', metrics.accuracy_score(y_train, predict_train_xgb))
print('Validation Accuracy:', metrics.accuracy_score(y_test, predict_test_xgb))
```
<a id="xgb">XGBoost</a> was ever so slightly less accurate than Random Forest in the validation set. Looks like Random Forest is my champion model.
## <a id="ccn"> Conclusion </a>
Although this was slightly disappointing, it looks like there still is no real answers to the secret of love. It's not interests or hobbies, it's not attractiveness or intelligence or other traits. Really it's just how much someone likes you. Who would have thought?
On the other hand, we did learn a thing or two. To summarize:
* Take your date out to the cliche dinner and a movie because both genders are likely to enjoy it
* Dating is tougher for men (perhaps women can be more selective?)
* More participants experienced unrequited love than those that found love -- so don't feel too bad
* People get their heartbroken regardless of their personal traits like attractiveness, so if you strike out, it's not you that is the problem
* Your impression of yourself is often wrong
* There is no one trait that makes you likeable
* Being likeable is important for securing that second date!
|
github_jupyter
|
# importing packages
%matplotlib inline
import pandas as pd
pd.options.display.max_rows = 1000 #handy for showing truncated results
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import metrics
# importing data
dating = pd.read_csv("speed-dating-data.csv", encoding="ISO-8859-1") # use this encoding, funky error otherwise. thanks kagglers!
dating.head(5)
# counting null values
dating.isnull().sum()
# age distribution of participants
age = dating[np.isfinite(dating['age'])]['age']
plt.hist(age.values)
plt.xlabel('Age')
plt.ylabel('Frequency')
# out of curiosity, I want to see how many speed daters found a match!
pd.crosstab(index=dating['match'],columns="count")
# narrowing dataset
dating_1 = dating.iloc[:, 11:28]
dating_2 = dating.iloc[:, 30:35]
dating_3 = dating.iloc[:, 39:43]
dating_4 = dating.iloc[:, 45:67]
dating_5 = dating.iloc[:, 69:74]
dating_6 = dating.iloc[:, 87:91]
dating_7 = dating.iloc[:, 97:102]
dating_8 = dating.iloc[:, 104:107]
date = pd.concat([dating.iloc[:, 0],dating.iloc[:, 2],dating_1,dating_2,dating_3,dating_4,dating_5,
dating_6,dating_7,dating_8], axis=1)
# counting null values
date.isnull().sum()
# removing null rows now that the nulls are in the hundreds and not the thousands
date2 = date.dropna()
# checking datatypes
date2.dtypes
# creating an object-free dataframe for later use
date3 = date2.drop(['field', 'from', 'career'], axis=1)
# heatmap
plt.subplots(figsize=(20,15))
ax = plt.axes()
ax.set_title("Correlation Heatmap")
corr = date3.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
# just for fun, looking at interest overlap
sns.set(style="ticks", color_codes=True)
date_int = pd.concat([date3.iloc[:, 1],date3.iloc[:, 30:32]],axis = 1)
g = sns.pairplot(date_int, hue="gender")
# just for fun, looking at interest overlap
sns.set(style="ticks", color_codes=True)
date_int = pd.concat([date3.iloc[:, 1],date3.iloc[:, 33:36]],axis = 1)
g = sns.pairplot(date_int, hue="gender")
# just for fun, looking at interest overlap
sns.set(style="ticks", color_codes=True)
date_int2 = pd.concat([date3.iloc[:, 1],date3.iloc[:, 41:44]],axis = 1)
g = sns.pairplot(date_int2, hue="gender")
# just for fun, looking at interest overlap
sns.set(style="ticks", color_codes=True)
date_int3 = pd.concat([date3.iloc[:, 1],date3.iloc[:, 45:47]],axis = 1)
g = sns.pairplot(date_int3, hue="gender")
# removing interests
date4 = date3.drop(['sports', 'tvsports', 'exercise', 'dining', 'museums', 'art', 'hiking',
'gaming', 'clubbing', 'reading', 'tv', 'theater', 'movies', 'concerts', 'music',
'shopping', 'yoga'], axis=1)
# looking at dec_o by gender
g = sns.FacetGrid(date4, col="gender")
g = g.map(plt.hist, "dec_o")
plt.ticklabel_format(useOffset=False, style='plain')
# chi-square test
gender_crosstab = pd.crosstab(index=date4.gender, columns=date4.dec_o)
gender_table = sm.stats.Table(gender_crosstab)
gender_rslt = gender_table.test_nominal_association()
gender_rslt.pvalue
# unrequited love count
no_love_count = len(date4[(date4['dec_o']==0) & (date4['dec']==1)])
+ len(date4[(date4['dec_o']==1) & (date4['dec']==0)])
perc_broken_heart = no_love_count / len(date4.index)
perc_broken_heart*100
# encoding unrequited love as a new column
date4['url']=np.where(((date4['dec_o']==0) & (date4['dec']==1))|((date4['dec']==0) & (date4['dec_o']==1)),1,0)
# looking at url by attractiveness
plt.figure(figsize=(7,9))
sns.boxplot(x='url', y='attr', data=date4, palette='cool')
plt.title('Broken Hearts by Attractiveness of Partner', fontsize=20)
plt.xlabel('Broken Heart', fontsize=16)
# chi-square test
bh_crosstab = pd.crosstab(index=date4.attr, columns=date4.url)
bh_table = sm.stats.Table(bh_crosstab)
bh_rslt = bh_table.test_nominal_association()
bh_rslt.pvalue
date5 = pd.concat([date4['attr3_1'],date4['sinc3_1'],date4['intel3_1'],date4['fun3_1'],date4['attr_o'],
date4['sinc_o'],date4['intel_o'],date4['fun_o'],date4['like'],date4['like_o'],
date4['int_corr'],date4['url']],axis=1)
plt.subplots(figsize=(15,10))
ax = plt.axes()
ax.set_title("Correlation Heatmap")
corr = date5.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
# OLS model with coefficients
X_ols = date4[['attr','sinc','intel','fun','like','int_corr']]
y_ols = date4.dec
traits = sm.OLS(y_ols, X_ols)
results_traits = traits.fit()
results_traits.summary()
# OLS model with coefficients
X_ols = date4[['dec','dec_o','attr','attr_o','fun','fun_o','like','like_o','int_corr']]
y_ols = date4.match
traits = sm.OLS(y_ols, X_ols)
results_traits = traits.fit()
results_traits.summary()
# preparing the data
X=date4[['like','dec']]
y=date4['match']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, stratify=y)
# logistic regression classification model
model = LogisticRegression(C=1, random_state=0)
lrc = model.fit(X_train, y_train)
predict_train_lrc = lrc.predict(X_train)
predict_test_lrc = lrc.predict(X_test)
print('Training Accuracy:', metrics.accuracy_score(y_train, predict_train_lrc))
print('Validation Accuracy:', metrics.accuracy_score(y_test, predict_test_lrc))
# random forest model
model = RandomForestClassifier()
rf_model = model.fit(X_train, y_train)
predict_train_rf = rf_model.predict(X_train)
predict_test_rf = rf_model.predict(X_test)
print('Training Accuracy:', metrics.accuracy_score(y_train, predict_train_rf))
print('Validation Accuracy:', metrics.accuracy_score(y_test, predict_test_rf))
# xgboost model
model = GradientBoostingClassifier()
xgb_model = model.fit(X_train, y_train)
predict_train_xgb = xgb_model.predict(X_train)
predict_test_xgb = xgb_model.predict(X_test)
print('Training Accuracy:', metrics.accuracy_score(y_train, predict_train_xgb))
print('Validation Accuracy:', metrics.accuracy_score(y_test, predict_test_xgb))
| 0.373762 | 0.861247 |
# **Tame Your Python**
Let's see how we can classify emails based on their contents
`Leggo`
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style('whitegrid')
def plot_dataset(X):
plt.scatter(X[:,0], X[:,1])
plt.show()
def evaluation_labels(y, y_pred):
from sklearn import metrics
adj_rand = metrics.adjusted_rand_score(y, y_pred)
print("Adjusted Rand Score = " + str(adj_rand))
adj_mi = metrics.adjusted_mutual_info_score(y, y_pred)
print("Adjusted Mutual Information = " + str(adj_mi))
h = metrics.homogeneity_score(y, y_pred)
print("Homogeneity = " + str(h))
v = metrics.v_measure_score(y, y_pred)
print("V-measure = " + str(v))
c = metrics.completeness_score(y, y_pred)
print("Completeness = " + str(c))
f = metrics.fowlkes_mallows_score(y, y_pred)
print("Fowlkes-Mallows = " + str(f))
return
def evaluation(X, y_pred):
from sklearn import metrics
try:
s = metrics.silhouette_score(X, y_pred, metric='euclidean')
print("Silhouette Coefficient = " + str(s))
c = metrics.calinski_harabasz_score(X, y_pred)
print("Calinski-Harabasz = " + str(c))
d = metrics.davies_bouldin_score(X, y_pred)
print("Davies-Bouldin = " + str(d))
except:
print("model did not converge")
return
def visual(c, X,y):
from sklearn.cluster import DBSCAN
cluster_object = DBSCAN(eps=0.3)
y_pred = cluster_object.fit_predict(X)
colors = ['red', 'green', 'blue', 'cyan', 'black', 'yellow', 'magenta', 'brown', 'orange', 'silver', 'goldenrod', 'olive', 'dodgerblue']
clusters = np.unique(y_pred)
print("Cluster Labels")
print(clusters)
print("Evaluation")
evaluation_labels(y, y_pred)
evaluation(X, y_pred)
for cluster in np.unique(y):
row_idx = np.where(y == cluster)
plt.scatter(X[row_idx, 0], X[row_idx, 1])
plt.title('Dataset')
plt.xlabel('X1')
plt.ylabel('X2')
plt.legend()
plt.show()
for cluster in clusters:
row_idx = np.where(y_pred == cluster)
plt.scatter(X[row_idx, 0], X[row_idx, 1])
plt.title('Clusters')
plt.xlabel('X1')
plt.ylabel('X2')
plt.legend()
plt.show()
```
## Get the dataset
```
n = 1000
from sklearn.datasets import make_moons, make_blobs, make_circles, make_s_curve
X_moons, y_moons = make_moons(n_samples = n, noise=0.1)
X_blobs, y_blobs = make_blobs(n_samples = n, n_features = 2)
X_circles, y_circles = make_circles(n_samples=n, noise=0.1, factor = 0.4)
X_scurve, y_scurve = make_s_curve(n_samples=n, noise = 0.1)
X_random = np.random.random([n, 2])
y_random = np.random.randint(0,3,size = [n])
transformation = [[0.80834549, -0.83667341], [-0.20887718, 0.85253229]]
X_aniso = np.dot(X_blobs, transformation)
y_aniso = y_blobs
plot_dataset(X_moons)
visual(2, X_moons, y_moons)
plot_dataset(X_blobs)
visual(3, X_blobs,y_blobs)
plot_dataset(X_circles)
visual(2, X_circles,y_circles)
plot_dataset(X_random)
visual(3, X_random,y_random)
plot_dataset(X_aniso)
visual(3, X_aniso,y_aniso)
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style('whitegrid')
def plot_dataset(X):
plt.scatter(X[:,0], X[:,1])
plt.show()
def evaluation_labels(y, y_pred):
from sklearn import metrics
adj_rand = metrics.adjusted_rand_score(y, y_pred)
print("Adjusted Rand Score = " + str(adj_rand))
adj_mi = metrics.adjusted_mutual_info_score(y, y_pred)
print("Adjusted Mutual Information = " + str(adj_mi))
h = metrics.homogeneity_score(y, y_pred)
print("Homogeneity = " + str(h))
v = metrics.v_measure_score(y, y_pred)
print("V-measure = " + str(v))
c = metrics.completeness_score(y, y_pred)
print("Completeness = " + str(c))
f = metrics.fowlkes_mallows_score(y, y_pred)
print("Fowlkes-Mallows = " + str(f))
return
def evaluation(X, y_pred):
from sklearn import metrics
try:
s = metrics.silhouette_score(X, y_pred, metric='euclidean')
print("Silhouette Coefficient = " + str(s))
c = metrics.calinski_harabasz_score(X, y_pred)
print("Calinski-Harabasz = " + str(c))
d = metrics.davies_bouldin_score(X, y_pred)
print("Davies-Bouldin = " + str(d))
except:
print("model did not converge")
return
def visual(c, X,y):
from sklearn.cluster import DBSCAN
cluster_object = DBSCAN(eps=0.3)
y_pred = cluster_object.fit_predict(X)
colors = ['red', 'green', 'blue', 'cyan', 'black', 'yellow', 'magenta', 'brown', 'orange', 'silver', 'goldenrod', 'olive', 'dodgerblue']
clusters = np.unique(y_pred)
print("Cluster Labels")
print(clusters)
print("Evaluation")
evaluation_labels(y, y_pred)
evaluation(X, y_pred)
for cluster in np.unique(y):
row_idx = np.where(y == cluster)
plt.scatter(X[row_idx, 0], X[row_idx, 1])
plt.title('Dataset')
plt.xlabel('X1')
plt.ylabel('X2')
plt.legend()
plt.show()
for cluster in clusters:
row_idx = np.where(y_pred == cluster)
plt.scatter(X[row_idx, 0], X[row_idx, 1])
plt.title('Clusters')
plt.xlabel('X1')
plt.ylabel('X2')
plt.legend()
plt.show()
n = 1000
from sklearn.datasets import make_moons, make_blobs, make_circles, make_s_curve
X_moons, y_moons = make_moons(n_samples = n, noise=0.1)
X_blobs, y_blobs = make_blobs(n_samples = n, n_features = 2)
X_circles, y_circles = make_circles(n_samples=n, noise=0.1, factor = 0.4)
X_scurve, y_scurve = make_s_curve(n_samples=n, noise = 0.1)
X_random = np.random.random([n, 2])
y_random = np.random.randint(0,3,size = [n])
transformation = [[0.80834549, -0.83667341], [-0.20887718, 0.85253229]]
X_aniso = np.dot(X_blobs, transformation)
y_aniso = y_blobs
plot_dataset(X_moons)
visual(2, X_moons, y_moons)
plot_dataset(X_blobs)
visual(3, X_blobs,y_blobs)
plot_dataset(X_circles)
visual(2, X_circles,y_circles)
plot_dataset(X_random)
visual(3, X_random,y_random)
plot_dataset(X_aniso)
visual(3, X_aniso,y_aniso)
| 0.651687 | 0.880181 |
# Building a simple agent with TextWorld
This tutorial outlines the steps to build an agent that learns how to play __choice-based__ text-based games generated with TextWorld.
### Prerequisite
To run this notebook you need [PyTorch](https://pytorch.org/) (tested with v0.4.1).
## Learning challenges
Training an agent such that it can learn how to play text-based games is not trivial. Among other challenges, we have to deal with
1. a combinatorial action space (that grows w.r.t. vocabulary)
2. a really sparse reward signal.
To ease the learning process, we will be requesting additional information alongside the game's narrative (as covered in [Playing TextWorld generated games with OpenAI Gym](Playing%20TextWorld%20generated%20games%20with%20OpenAI%20Gym.ipynb#Interact-with-the-game)). More specifically, we will request the following information:
- __Description__:
For every game state, we will get the output of the `look` command which describes the current location;
- __Inventory__:
For every game state, we will get the output of the `inventory` command which describes the player's inventory;
- __Admissible commands__:
For every game state, we will get the list of commands guaranteed to be understood by the game interpreter;
- __Intermediate reward__:
For every game state, we will get an intermediate reward which can either be:
- __-1__: last action needs to be undone before resuming the quest
- __0__: last action didn't affect the quest
- __1__: last action brought us closer to completing the quest
- __Entities__:
For every game, we will get a list of entity names that the agent can interact with.
## Simple test games
We can use TextWorld to generate a few simple games with the following handcrafted world
```
Bathroom
+
|
+
Bedroom +-(d1)-+ Kitchen +--(d2)--+ Backyard
(P) + +
| |
+ +
Living Room Garden
```
where the goal is always to retrieve a hidden food item and put it on the stove which located in the kitchen. One can lose the game if it eats the food item instead of putting it on the stove!
Using `tw-make tw-simple ...` (see `make_games.sh` for the exact commands), we generated the following 7 games:
| gamefile | description |
| -------- | ----------- |
| `games/rewardsDense_goalDetailed.ulx` | dense reward + detailed instructions |
| `games/rewardsBalanced_goalDetailed.ulx` | balanced rewards + detailed instructions |
| `games/rewardsSparse_goalDetailed.ulx` | sparse rewards + detailed instructions |
| `games/rewardsDense_goalBrief.ulx` | dense rewards + no instructions but the goal is mentionned |
| `games/rewardsBalanced_goalBrief.ulx` | balanced rewards + no instructions but the goal is mentionned |
| `games/rewardsSparse_goalBrief.ulx` | sparse rewards + no instructions but the goal is mentionned |
| `games/rewardsSparse_goalNone.ulx` | sparse rewards + no instructions/goal<br>_Hint: there's an hidden note in the game that describes the goal!_ |
## Building the random baseline
Let's start with building an agent that simply selects an admissible command at random.
```
from typing import Mapping, Any
import numpy as np
import textworld.gym
class RandomAgent(textworld.gym.Agent):
""" Agent that randomly selects a command from the admissible ones. """
def __init__(self, seed=1234):
self.seed = seed
self.rng = np.random.RandomState(self.seed)
@property
def infos_to_request(self) -> textworld.EnvInfos:
return textworld.EnvInfos(admissible_commands=True)
def act(self, obs: str, score: int, done: bool, infos: Mapping[str, Any]) -> str:
return self.rng.choice(infos["admissible_commands"])
```
## Play function
Let's write a simple function to play a text-based game using an agent.
```
import os
from glob import glob
import gym
import textworld.gym
def play(agent, path, max_step=100, nb_episodes=10, verbose=True):
infos_to_request = agent.infos_to_request
infos_to_request.max_score = True # Needed to normalize the scores.
gamefiles = [path]
if os.path.isdir(path):
gamefiles = glob(os.path.join(path, "*.ulx"))
env_id = textworld.gym.register_games(gamefiles,
request_infos=infos_to_request,
max_episode_steps=max_step)
env = gym.make(env_id) # Create a Gym environment to play the text game.
if verbose:
if os.path.isdir(path):
print(os.path.dirname(path), end="")
else:
print(os.path.basename(path), end="")
# Collect some statistics: nb_steps, final reward.
avg_moves, avg_scores, avg_norm_scores = [], [], []
for no_episode in range(nb_episodes):
obs, infos = env.reset() # Start new episode.
score = 0
done = False
nb_moves = 0
while not done:
command = agent.act(obs, score, done, infos)
obs, score, done, infos = env.step(command)
nb_moves += 1
agent.act(obs, score, done, infos) # Let the agent know the game is done.
if verbose:
print(".", end="")
avg_moves.append(nb_moves)
avg_scores.append(score)
avg_norm_scores.append(score / infos["max_score"])
env.close()
msg = " \tavg. steps: {:5.1f}; avg. score: {:4.1f} / {}."
if verbose:
if os.path.isdir(path):
print(msg.format(np.mean(avg_moves), np.mean(avg_norm_scores), 1))
else:
print(msg.format(np.mean(avg_moves), np.mean(avg_scores), infos["max_score"]))
```
#### Testing the random agent
```
# We report the score and steps averaged over 10 playthroughs.
play(RandomAgent(), "./games/rewardsDense_goalDetailed.ulx") # Dense rewards
play(RandomAgent(), "./games/rewardsBalanced_goalDetailed.ulx") # Balanced rewards
play(RandomAgent(), "./games/rewardsSparse_goalDetailed.ulx") # Sparse rewards
```
## Neural agent
Now, let's create an agent that can learn to play text-based games. The agent will be trained to select a command from the list of admissible commands given the current game's narrative, inventory, and room description.
Here's the implementation of that learning agent that uses [PyTorch](https://pytorch.org/).
```
import re
from typing import List, Mapping, Any, Optional
from collections import defaultdict
import numpy as np
import textworld
import textworld.gym
from textworld import EnvInfos
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class CommandScorer(nn.Module):
def __init__(self, input_size, hidden_size):
super(CommandScorer, self).__init__()
torch.manual_seed(42) # For reproducibility
self.embedding = nn.Embedding(input_size, hidden_size)
self.encoder_gru = nn.GRU(hidden_size, hidden_size)
self.cmd_encoder_gru = nn.GRU(hidden_size, hidden_size)
self.state_gru = nn.GRU(hidden_size, hidden_size)
self.hidden_size = hidden_size
self.state_hidden = torch.zeros(1, 1, hidden_size, device=device)
self.critic = nn.Linear(hidden_size, 1)
self.att_cmd = nn.Linear(hidden_size * 2, 1)
def forward(self, obs, commands, **kwargs):
input_length = obs.size(0)
batch_size = obs.size(1)
nb_cmds = commands.size(1)
embedded = self.embedding(obs)
encoder_output, encoder_hidden = self.encoder_gru(embedded)
state_output, state_hidden = self.state_gru(encoder_hidden, self.state_hidden)
self.state_hidden = state_hidden
value = self.critic(state_output)
# Attention network over the commands.
cmds_embedding = self.embedding.forward(commands)
_, cmds_encoding_last_states = self.cmd_encoder_gru.forward(cmds_embedding) # 1 x cmds x hidden
# Same observed state for all commands.
cmd_selector_input = torch.stack([state_hidden] * nb_cmds, 2) # 1 x batch x cmds x hidden
# Same command choices for the whole batch.
cmds_encoding_last_states = torch.stack([cmds_encoding_last_states] * batch_size, 1) # 1 x batch x cmds x hidden
# Concatenate the observed state and command encodings.
cmd_selector_input = torch.cat([cmd_selector_input, cmds_encoding_last_states], dim=-1)
# Compute one score per command.
scores = F.relu(self.att_cmd(cmd_selector_input)).squeeze(-1) # 1 x Batch x cmds
probs = F.softmax(scores, dim=2) # 1 x Batch x cmds
index = probs[0].multinomial(num_samples=1).unsqueeze(0) # 1 x batch x indx
return scores, index, value
def reset_hidden(self, batch_size):
self.state_hidden = torch.zeros(1, batch_size, self.hidden_size, device=device)
class NeuralAgent:
""" Simple Neural Agent for playing TextWorld games. """
MAX_VOCAB_SIZE = 1000
UPDATE_FREQUENCY = 10
LOG_FREQUENCY = 1000
GAMMA = 0.9
def __init__(self) -> None:
self._initialized = False
self._epsiode_has_started = False
self.id2word = ["<PAD>", "<UNK>"]
self.word2id = {w: i for i, w in enumerate(self.id2word)}
self.model = CommandScorer(input_size=self.MAX_VOCAB_SIZE, hidden_size=128)
self.optimizer = optim.Adam(self.model.parameters(), 0.00003)
self.mode = "test"
def train(self):
self.mode = "train"
self.stats = {"max": defaultdict(list), "mean": defaultdict(list)}
self.transitions = []
self.model.reset_hidden(1)
self.last_score = 0
self.no_train_step = 0
def test(self):
self.mode = "test"
self.model.reset_hidden(1)
@property
def infos_to_request(self) -> EnvInfos:
return EnvInfos(description=True, inventory=True, admissible_commands=True,
won=True, lost=True)
def _get_word_id(self, word):
if word not in self.word2id:
if len(self.word2id) >= self.MAX_VOCAB_SIZE:
return self.word2id["<UNK>"]
self.id2word.append(word)
self.word2id[word] = len(self.word2id)
return self.word2id[word]
def _tokenize(self, text):
# Simple tokenizer: strip out all non-alphabetic characters.
text = re.sub("[^a-zA-Z0-9\- ]", " ", text)
word_ids = list(map(self._get_word_id, text.split()))
return word_ids
def _process(self, texts):
texts = list(map(self._tokenize, texts))
max_len = max(len(l) for l in texts)
padded = np.ones((len(texts), max_len)) * self.word2id["<PAD>"]
for i, text in enumerate(texts):
padded[i, :len(text)] = text
padded_tensor = torch.from_numpy(padded).type(torch.long).to(device)
padded_tensor = padded_tensor.permute(1, 0) # Batch x Seq => Seq x Batch
return padded_tensor
def _discount_rewards(self, last_values):
returns, advantages = [], []
R = last_values.data
for t in reversed(range(len(self.transitions))):
rewards, _, _, values = self.transitions[t]
R = rewards + self.GAMMA * R
adv = R - values
returns.append(R)
advantages.append(adv)
return returns[::-1], advantages[::-1]
def act(self, obs: str, score: int, done: bool, infos: Mapping[str, Any]) -> Optional[str]:
# Build agent's observation: feedback + look + inventory.
input_ = "{}\n{}\n{}".format(obs, infos["description"], infos["inventory"])
# Tokenize and pad the input and the commands to chose from.
input_tensor = self._process([input_])
commands_tensor = self._process(infos["admissible_commands"])
# Get our next action and value prediction.
outputs, indexes, values = self.model(input_tensor, commands_tensor)
action = infos["admissible_commands"][indexes[0]]
if self.mode == "test":
if done:
self.model.reset_hidden(1)
return action
self.no_train_step += 1
if self.transitions:
reward = score - self.last_score # Reward is the gain/loss in score.
self.last_score = score
if infos["won"]:
reward += 100
if infos["lost"]:
reward -= 100
self.transitions[-1][0] = reward # Update reward information.
self.stats["max"]["score"].append(score)
if self.no_train_step % self.UPDATE_FREQUENCY == 0:
# Update model
returns, advantages = self._discount_rewards(values)
loss = 0
for transition, ret, advantage in zip(self.transitions, returns, advantages):
reward, indexes_, outputs_, values_ = transition
advantage = advantage.detach() # Block gradients flow here.
probs = F.softmax(outputs_, dim=2)
log_probs = torch.log(probs)
log_action_probs = log_probs.gather(2, indexes_)
policy_loss = (-log_action_probs * advantage).sum()
value_loss = (.5 * (values_ - ret) ** 2.).sum()
entropy = (-probs * log_probs).sum()
loss += policy_loss + 0.5 * value_loss - 0.1 * entropy
self.stats["mean"]["reward"].append(reward)
self.stats["mean"]["policy"].append(policy_loss.item())
self.stats["mean"]["value"].append(value_loss.item())
self.stats["mean"]["entropy"].append(entropy.item())
self.stats["mean"]["confidence"].append(torch.exp(log_action_probs).item())
if self.no_train_step % self.LOG_FREQUENCY == 0:
msg = "{}. ".format(self.no_train_step)
msg += " ".join("{}: {:.3f}".format(k, np.mean(v)) for k, v in self.stats["mean"].items())
msg += " " + " ".join("{}: {}".format(k, np.max(v)) for k, v in self.stats["max"].items())
msg += " vocab: {}".format(len(self.id2word))
print(msg)
self.stats = {"max": defaultdict(list), "mean": defaultdict(list)}
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 40)
self.optimizer.step()
self.optimizer.zero_grad()
self.transitions = []
self.model.reset_hidden(1)
else:
# Keep information about transitions for Truncated Backpropagation Through Time.
self.transitions.append([None, indexes, outputs, values]) # Reward will be set on the next call
if done:
self.last_score = 0 # Will be starting a new episode. Reset the last score.
return action
```
#### Training the neural agent
```
agent = NeuralAgent()
play(agent, "./games/rewardsDense_goalDetailed.ulx")
```
Unsurprisingly, the result is not much different from what the random agent can get.
Let's train the agent for a few episodes.
```
from time import time
agent = NeuralAgent()
print("Training")
agent.train() # Tell the agent it should update its parameters.
starttime = time()
play(agent, "./games/rewardsDense_goalDetailed.ulx", nb_episodes=500, verbose=False) # Dense rewards game.
print("Trained in {:.2f} secs".format(time() - starttime))
```
#### Testing the trained agent
```
# We report the score and steps averaged over 10 playthroughs.
agent.test()
play(agent, "./games/rewardsDense_goalDetailed.ulx") # Dense rewards game.
```
Of course, since we trained on that single simple game, it's not surprinsing the agent can achieve a high score on it. It would be more interesting to evaluate the generalization capability of the agent.
To do so, we are going to test the agent on another game drawn from the same game distribution (i.e. same world but the goal is to pick another food item). Let's generate `games/another_game.ulx` with the same rewards density (`--rewards dense`) and the same goal description (`--goal detailed`), but using `--seed 1` and without the `--test` flag (to make sure the game is not part of the test set since `games/rewardsDense_goalDetailed.ulx` is).
```
!tw-make tw-simple --rewards dense --goal detailed --seed 1 --output games/another_game.ulx -v -f
# We report the score and steps averaged over 10 playthroughs.
play(RandomAgent(), "./games/another_game.ulx")
play(agent, "./games/another_game.ulx")
```
As we can see the trained agent barely does better than the random agent. In order to improve the agent's generalization capability, we should train it on many different games drawn from the game distribution.
One could use the following command to easily generate 100 training games:
```
! seq 1 100 | xargs -n1 -P4 tw-make tw-simple --rewards dense --goal detailed --format ulx --output training_games/ --seed
```
Then, we train our agent on that set of training games.
```
from time import time
agent = NeuralAgent()
print("Training on 100 games")
agent.train() # Tell the agent it should update its parameters.
starttime = time()
play(agent, "./training_games/", nb_episodes=100 * 5, verbose=False) # Each game will be seen 5 times.
print("Trained in {:.2f} secs".format(time() - starttime))
```
#### Evaluating the agent on the test distribution
We will generate 20 test games and evaluate the agent on them.
```
! seq 1 20 | xargs -n1 -P4 tw-make tw-simple --rewards dense --goal detailed --test --format ulx --output testing_games/ --seed
agent.test()
play(agent, "./games/rewardsDense_goalDetailed.ulx") # Averaged over 10 playthroughs.
play(agent, "./testing_games/", nb_episodes=20 * 10) # Averaged over 10 playthroughs for each test game.
play(RandomAgent(), "./testing_games/", nb_episodes=20 * 10)
```
While not being perfect, the agent manage to score more points on average compared to the random agent.
## Next steps
Here are a few possible directions one can take to improve the agent's performance.
- Adding more training games
- Changing the agent architecture
- Leveraging already trained word embeddings
- Playing more games at once (see [`textworld.gym.make_batch`](https://textworld.readthedocs.io/en/latest/textworld.gym.html#textworld.gym.utils.make_batch))
## Papers about RL applied to text-based games
* [Language Understanding for Text-based games using Deep Reinforcement Learning][narasimhan_et_al_2015]
* [Learning How Not to Act in Text-based Games][haroush_et_al_2017]
* [Deep Reinforcement Learning with a Natural Language Action Space][he_et_al_2015]
* [What can you do with a rock? Affordance extraction via word embeddings][fulda_et_al_2017]
* [Text-based adventures of the Golovin AI Agent][kostka_et_al_2017]
* [Using reinforcement learning to learn how to play text-based games][zelinka_2018]
[narasimhan_et_al_2015]: https://arxiv.org/abs/1506.08941
[haroush_et_al_2017]: https://openreview.net/pdf?id=B1-tVX1Pz
[he_et_al_2015]: https://arxiv.org/abs/1511.04636
[fulda_et_al_2017]: https://arxiv.org/abs/1703.03429
[kostka_et_al_2017]: https://arxiv.org/abs/1705.05637
[zelinka_2018]: https://arxiv.org/abs/1801.01999
|
github_jupyter
|
Bathroom
+
|
+
Bedroom +-(d1)-+ Kitchen +--(d2)--+ Backyard
(P) + +
| |
+ +
Living Room Garden
from typing import Mapping, Any
import numpy as np
import textworld.gym
class RandomAgent(textworld.gym.Agent):
""" Agent that randomly selects a command from the admissible ones. """
def __init__(self, seed=1234):
self.seed = seed
self.rng = np.random.RandomState(self.seed)
@property
def infos_to_request(self) -> textworld.EnvInfos:
return textworld.EnvInfos(admissible_commands=True)
def act(self, obs: str, score: int, done: bool, infos: Mapping[str, Any]) -> str:
return self.rng.choice(infos["admissible_commands"])
import os
from glob import glob
import gym
import textworld.gym
def play(agent, path, max_step=100, nb_episodes=10, verbose=True):
infos_to_request = agent.infos_to_request
infos_to_request.max_score = True # Needed to normalize the scores.
gamefiles = [path]
if os.path.isdir(path):
gamefiles = glob(os.path.join(path, "*.ulx"))
env_id = textworld.gym.register_games(gamefiles,
request_infos=infos_to_request,
max_episode_steps=max_step)
env = gym.make(env_id) # Create a Gym environment to play the text game.
if verbose:
if os.path.isdir(path):
print(os.path.dirname(path), end="")
else:
print(os.path.basename(path), end="")
# Collect some statistics: nb_steps, final reward.
avg_moves, avg_scores, avg_norm_scores = [], [], []
for no_episode in range(nb_episodes):
obs, infos = env.reset() # Start new episode.
score = 0
done = False
nb_moves = 0
while not done:
command = agent.act(obs, score, done, infos)
obs, score, done, infos = env.step(command)
nb_moves += 1
agent.act(obs, score, done, infos) # Let the agent know the game is done.
if verbose:
print(".", end="")
avg_moves.append(nb_moves)
avg_scores.append(score)
avg_norm_scores.append(score / infos["max_score"])
env.close()
msg = " \tavg. steps: {:5.1f}; avg. score: {:4.1f} / {}."
if verbose:
if os.path.isdir(path):
print(msg.format(np.mean(avg_moves), np.mean(avg_norm_scores), 1))
else:
print(msg.format(np.mean(avg_moves), np.mean(avg_scores), infos["max_score"]))
# We report the score and steps averaged over 10 playthroughs.
play(RandomAgent(), "./games/rewardsDense_goalDetailed.ulx") # Dense rewards
play(RandomAgent(), "./games/rewardsBalanced_goalDetailed.ulx") # Balanced rewards
play(RandomAgent(), "./games/rewardsSparse_goalDetailed.ulx") # Sparse rewards
import re
from typing import List, Mapping, Any, Optional
from collections import defaultdict
import numpy as np
import textworld
import textworld.gym
from textworld import EnvInfos
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class CommandScorer(nn.Module):
def __init__(self, input_size, hidden_size):
super(CommandScorer, self).__init__()
torch.manual_seed(42) # For reproducibility
self.embedding = nn.Embedding(input_size, hidden_size)
self.encoder_gru = nn.GRU(hidden_size, hidden_size)
self.cmd_encoder_gru = nn.GRU(hidden_size, hidden_size)
self.state_gru = nn.GRU(hidden_size, hidden_size)
self.hidden_size = hidden_size
self.state_hidden = torch.zeros(1, 1, hidden_size, device=device)
self.critic = nn.Linear(hidden_size, 1)
self.att_cmd = nn.Linear(hidden_size * 2, 1)
def forward(self, obs, commands, **kwargs):
input_length = obs.size(0)
batch_size = obs.size(1)
nb_cmds = commands.size(1)
embedded = self.embedding(obs)
encoder_output, encoder_hidden = self.encoder_gru(embedded)
state_output, state_hidden = self.state_gru(encoder_hidden, self.state_hidden)
self.state_hidden = state_hidden
value = self.critic(state_output)
# Attention network over the commands.
cmds_embedding = self.embedding.forward(commands)
_, cmds_encoding_last_states = self.cmd_encoder_gru.forward(cmds_embedding) # 1 x cmds x hidden
# Same observed state for all commands.
cmd_selector_input = torch.stack([state_hidden] * nb_cmds, 2) # 1 x batch x cmds x hidden
# Same command choices for the whole batch.
cmds_encoding_last_states = torch.stack([cmds_encoding_last_states] * batch_size, 1) # 1 x batch x cmds x hidden
# Concatenate the observed state and command encodings.
cmd_selector_input = torch.cat([cmd_selector_input, cmds_encoding_last_states], dim=-1)
# Compute one score per command.
scores = F.relu(self.att_cmd(cmd_selector_input)).squeeze(-1) # 1 x Batch x cmds
probs = F.softmax(scores, dim=2) # 1 x Batch x cmds
index = probs[0].multinomial(num_samples=1).unsqueeze(0) # 1 x batch x indx
return scores, index, value
def reset_hidden(self, batch_size):
self.state_hidden = torch.zeros(1, batch_size, self.hidden_size, device=device)
class NeuralAgent:
""" Simple Neural Agent for playing TextWorld games. """
MAX_VOCAB_SIZE = 1000
UPDATE_FREQUENCY = 10
LOG_FREQUENCY = 1000
GAMMA = 0.9
def __init__(self) -> None:
self._initialized = False
self._epsiode_has_started = False
self.id2word = ["<PAD>", "<UNK>"]
self.word2id = {w: i for i, w in enumerate(self.id2word)}
self.model = CommandScorer(input_size=self.MAX_VOCAB_SIZE, hidden_size=128)
self.optimizer = optim.Adam(self.model.parameters(), 0.00003)
self.mode = "test"
def train(self):
self.mode = "train"
self.stats = {"max": defaultdict(list), "mean": defaultdict(list)}
self.transitions = []
self.model.reset_hidden(1)
self.last_score = 0
self.no_train_step = 0
def test(self):
self.mode = "test"
self.model.reset_hidden(1)
@property
def infos_to_request(self) -> EnvInfos:
return EnvInfos(description=True, inventory=True, admissible_commands=True,
won=True, lost=True)
def _get_word_id(self, word):
if word not in self.word2id:
if len(self.word2id) >= self.MAX_VOCAB_SIZE:
return self.word2id["<UNK>"]
self.id2word.append(word)
self.word2id[word] = len(self.word2id)
return self.word2id[word]
def _tokenize(self, text):
# Simple tokenizer: strip out all non-alphabetic characters.
text = re.sub("[^a-zA-Z0-9\- ]", " ", text)
word_ids = list(map(self._get_word_id, text.split()))
return word_ids
def _process(self, texts):
texts = list(map(self._tokenize, texts))
max_len = max(len(l) for l in texts)
padded = np.ones((len(texts), max_len)) * self.word2id["<PAD>"]
for i, text in enumerate(texts):
padded[i, :len(text)] = text
padded_tensor = torch.from_numpy(padded).type(torch.long).to(device)
padded_tensor = padded_tensor.permute(1, 0) # Batch x Seq => Seq x Batch
return padded_tensor
def _discount_rewards(self, last_values):
returns, advantages = [], []
R = last_values.data
for t in reversed(range(len(self.transitions))):
rewards, _, _, values = self.transitions[t]
R = rewards + self.GAMMA * R
adv = R - values
returns.append(R)
advantages.append(adv)
return returns[::-1], advantages[::-1]
def act(self, obs: str, score: int, done: bool, infos: Mapping[str, Any]) -> Optional[str]:
# Build agent's observation: feedback + look + inventory.
input_ = "{}\n{}\n{}".format(obs, infos["description"], infos["inventory"])
# Tokenize and pad the input and the commands to chose from.
input_tensor = self._process([input_])
commands_tensor = self._process(infos["admissible_commands"])
# Get our next action and value prediction.
outputs, indexes, values = self.model(input_tensor, commands_tensor)
action = infos["admissible_commands"][indexes[0]]
if self.mode == "test":
if done:
self.model.reset_hidden(1)
return action
self.no_train_step += 1
if self.transitions:
reward = score - self.last_score # Reward is the gain/loss in score.
self.last_score = score
if infos["won"]:
reward += 100
if infos["lost"]:
reward -= 100
self.transitions[-1][0] = reward # Update reward information.
self.stats["max"]["score"].append(score)
if self.no_train_step % self.UPDATE_FREQUENCY == 0:
# Update model
returns, advantages = self._discount_rewards(values)
loss = 0
for transition, ret, advantage in zip(self.transitions, returns, advantages):
reward, indexes_, outputs_, values_ = transition
advantage = advantage.detach() # Block gradients flow here.
probs = F.softmax(outputs_, dim=2)
log_probs = torch.log(probs)
log_action_probs = log_probs.gather(2, indexes_)
policy_loss = (-log_action_probs * advantage).sum()
value_loss = (.5 * (values_ - ret) ** 2.).sum()
entropy = (-probs * log_probs).sum()
loss += policy_loss + 0.5 * value_loss - 0.1 * entropy
self.stats["mean"]["reward"].append(reward)
self.stats["mean"]["policy"].append(policy_loss.item())
self.stats["mean"]["value"].append(value_loss.item())
self.stats["mean"]["entropy"].append(entropy.item())
self.stats["mean"]["confidence"].append(torch.exp(log_action_probs).item())
if self.no_train_step % self.LOG_FREQUENCY == 0:
msg = "{}. ".format(self.no_train_step)
msg += " ".join("{}: {:.3f}".format(k, np.mean(v)) for k, v in self.stats["mean"].items())
msg += " " + " ".join("{}: {}".format(k, np.max(v)) for k, v in self.stats["max"].items())
msg += " vocab: {}".format(len(self.id2word))
print(msg)
self.stats = {"max": defaultdict(list), "mean": defaultdict(list)}
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 40)
self.optimizer.step()
self.optimizer.zero_grad()
self.transitions = []
self.model.reset_hidden(1)
else:
# Keep information about transitions for Truncated Backpropagation Through Time.
self.transitions.append([None, indexes, outputs, values]) # Reward will be set on the next call
if done:
self.last_score = 0 # Will be starting a new episode. Reset the last score.
return action
agent = NeuralAgent()
play(agent, "./games/rewardsDense_goalDetailed.ulx")
from time import time
agent = NeuralAgent()
print("Training")
agent.train() # Tell the agent it should update its parameters.
starttime = time()
play(agent, "./games/rewardsDense_goalDetailed.ulx", nb_episodes=500, verbose=False) # Dense rewards game.
print("Trained in {:.2f} secs".format(time() - starttime))
# We report the score and steps averaged over 10 playthroughs.
agent.test()
play(agent, "./games/rewardsDense_goalDetailed.ulx") # Dense rewards game.
!tw-make tw-simple --rewards dense --goal detailed --seed 1 --output games/another_game.ulx -v -f
# We report the score and steps averaged over 10 playthroughs.
play(RandomAgent(), "./games/another_game.ulx")
play(agent, "./games/another_game.ulx")
! seq 1 100 | xargs -n1 -P4 tw-make tw-simple --rewards dense --goal detailed --format ulx --output training_games/ --seed
from time import time
agent = NeuralAgent()
print("Training on 100 games")
agent.train() # Tell the agent it should update its parameters.
starttime = time()
play(agent, "./training_games/", nb_episodes=100 * 5, verbose=False) # Each game will be seen 5 times.
print("Trained in {:.2f} secs".format(time() - starttime))
! seq 1 20 | xargs -n1 -P4 tw-make tw-simple --rewards dense --goal detailed --test --format ulx --output testing_games/ --seed
agent.test()
play(agent, "./games/rewardsDense_goalDetailed.ulx") # Averaged over 10 playthroughs.
play(agent, "./testing_games/", nb_episodes=20 * 10) # Averaged over 10 playthroughs for each test game.
play(RandomAgent(), "./testing_games/", nb_episodes=20 * 10)
| 0.858778 | 0.970882 |
### <p style="text-align: right;"> ✅Jake Volek</p>
# Homework Assignment #1 (Individual)
## Git practice, debugging practice, unfamiliar data, and new Python packages
<img src="https://www.noao.edu/image_gallery/images/d3/02677.jpg" width=300px align="right" style="margin-left: 20px" alt="Image credit: www.noao.edu">
### Goals for this homework assignment
By the end of this assignment, you should be able to:
* Use Git to create a repository, track changes to the files within the repository, and push those changes to a remote repository.
* Debug some Python code.
* Work with an unfamiliar data format and successfully load it into your notebook.
* Visualize FITS image files using Python.
* Read documentation and example code to use a new Python package
* Do a bit a bit of simple image manipulation using Python functions.
Work through the following assignment, making sure to follow all of the directions and answer all of the questions.
There are **72 points** possible on this assignment. Point values for each part are included in the section headers and question prompts.
**This assignment is due roughly two weeks from now at 11:59 pm on Friday, February 12.** It should be uploaded into the "Homework Assignments" submission folder for Homework #1. Submission instructions can be found at the end of the notebook.
---
## Part 1: Setting up a git repository to track your progress on your assignment (6 points)
For this assignment, you're going to create new **private** GitHub repository that you can used to track your progress on this homework assignment and future assignments. Again, this should be a **private** repository so that your solutions are not publicly accessible.
**✅ Do the following**:
1. On [GitHub](https://github.com) make sure you are logged into your account and then create a new GitHub repository called `cmse202-s21-turnin`.
2. Once you've initialized the repository on GitHub, **clone a copy of it onto JupyterHub or your computer**.
3. Inside the `cmse202-s21-turnin` repository, create a new folder called `hw-01`.
4. Move this notebook into that **new directory** in your repository then **add it and commit it to your repository**. **Important**: you'll want to make sure you **save and close** the notebook before you do this step and then re-open it once you've added it to your repository.
5. Finally, to test that everything is working, `git push` the notebook file so that it shows up in your **private** GitHub repository on the web.
**Important**: Make sure you've added your Professor and your TA as collaborators to your new "turnin" respository with "Read" access so that they can see your assignment. **You should check the Slack channel for your section of the course to get this information.**
**Double-check the following**: Make sure that the version of this notebook that you are working on is the same one that you just added to your repository! If you are working on a different copy of the noteobok, **none of your changes will be tracked**.
If everything went as intended, the file should now show up on your GitHub account in the "`cmse202-s21-turnin`" repository inside the `hw-01` directory that you just created. Periodically, **you'll be asked to commit your changes to the repository and push them to the remote GitHub location**. Of course, you can always commit your changes more often than that, if you wish. It can be good to get into a habit of committing your changes any time you make a significant modification, or when you stop working on the project for a bit.
---
## Part 2: Bit of code debugging: reading Python and understanding error messages (6 points)
As a bit of Python practice, review the following code, read the error outputs and **fix the code*. When you fix the code **add a comment to explain what was wrong with the original code**.
### Fixing errors
**Question 1 [6 points]**: Resolve the errors in the following pieces of code and add a comment that explains what was wrong in the first place.
```
for i in range(10):
print("The value of i is %i" %i)
#Missing a colon after the for statement.
def compute_fraction(numerator, denominator):
if denominator == 0:
print("Error: Cannot Divide by 0. Enter a new denominator.")
else:
fraction = numerator/denominator
print("The value of the fraction is %f" %fraction)
compute_fraction(5, 0)
#Cannot Divide by 0. Add an error message if denominator is 0.
def compute_fraction(numerator, denominator):
if type(numerator) == str or type(denominator) == str:
print("Cannot do math operations with type string")
else:
fraction = numerator/denominator
print("The value of the fraction is %f" %fraction)
compute_fraction("one", 25)
#Cannot do math operations with strings. Print error message saying must be int or float
import numpy as np
n = np.arange(20)
print("The value of the 10th element is %d" %n[9])
#Use brackets instead of parenthesis.
odd = [1, 3, 5, 7, 9]
even = [2, 4, 6, 8, 10]
for i in odd:
print(i)
for j in even:
print(j)
#Even list was spelled 'evven' in the for statement.
spanish = dict()
spanish['hello'] = 'hola'
spanish['yes'] = 'si'
spanish['one'] = 'uno'
spanish['two'] = 'dos'
spanish['three'] = 'tres'
spanish['red'] = 'rojo'
spanish['black'] = 'negro'
spanish['green'] = 'verde'
spanish['blue'] = 'azul'
spanish['orange'] = 'anaranjado'
print(spanish["hello"])
print(spanish["one"], spanish["two"], spanish["three"])
print(spanish["orange"])
#Orange was not an initialized key in the dictionary.
```
---
### 🛑 STOP
**Pause to commit your changes to your Git repository!**
Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing Part 2", and push the changes to GitHub.
---
## Part 3: Working with unfamiliar data and a new Python library to create an astronomical image (60 points)
Since we're been practicing download data and repositories from the internet and learning to use new Python packages, you're going to practice doing exactly that in this assignment! This will require using the command line a bit (or running command-line commands from inside your notebook), reading documentation, and looking at code examples you're not familiar with. These are all authentic parts of being an independent computational professional.
---
### 3.1: Download the data! (4 points)
For this part of the assignment you're going to need to download a couple data files from the internet. They are relatively small files, so it shouldn't take too long. If you run into issues accessing the files and it seems to be unrelated to the commands you're using, contact your instructor, TA, or LA. Remember, in order to work with the data in this notebook, you'll need to make sure the data is in the same place as the notebook or you'll need to put the full path to the file in your data reading commands.
**DO NOT** commit the data files to your repository! Since you can always download the file again if you're on another machine, it's not necessary to add the file to the repository. In addition, you should be cautious about committing data files to Git repositories because adding large files to a repository means that those large files will have to be downloaded every time you want to clone a new version of the repository, which can be very time-consuming. You should not try to version control large datasets! (Yes, these datasets are fairly small, so you could get away with adding them for this case, but as a rule of thumb, **you should think carefully before you commit data to a repository!**)
The files you need are located here:
`https://raw.githubusercontent.com/msu-cmse-courses/cmse202-S21-student/master/data/m42_40min_ir.fits`
`https://raw.githubusercontent.com/msu-cmse-courses/cmse202-S21-student/master/data/m42_40min_red.fits`
**✅ Question 2 [4 points]:** In the cell below, include the command line commands that you used to download the files (you can either run the command on the command line or inside the jupyter notebook using the correct leading character). If you're not sure how to download them using the command line, download them however you need to in order to get them on to your computer and move on to the later parts of the assignment.
```
#!pip install astropy
#Import the needed functions
from astropy.io import fits
import pandas as pd
#Use curl command and fits functions to read the data in
!curl -O https://raw.githubusercontent.com/msu-cmse-courses/cmse202-S21-student/master/data/m42_40min_ir.fits
ir = fits.open("m42_40min_ir.fits")
ir_data = fits.getdata("m42_40min_ir.fits", ext = 0)
ir_data
```
---
### 3.2: Loading/Reading unfamiliar astronomical data in Python (14 points)
You might notice that the files you downloaded have the extension ".fits". This is likely a file extension that you are not familiar with and it actually indicates that it is an "FITS" file (clever, right?). What does this mean? Do a quick internet search to figure out what type of file this is and what it is commonly used for.
**✅ Question 3 [2 points]:** Record your findings below. Explain what a FITS file is and what sort of information it commonly is used to store.
FITS stands for Flexible Image Transport System. This type of file often does not always include image data as it usually stores scientific images with associated data. One example of a FITS file being utilized would be astronomers using one to not just look at a picture, but look at and examine the data to correlate with it.
Although you might not have a background in which you've ever worked with FITS files before, you should have all of the skills necessary to interact with this data in Python. Of course, we haven't actually used FITS for anything we've done in class. So, your first task is to figure out how to open and read the file using Python.
Time to consult the internet!
**✅ Question 4 [2 points]:** List any/all packages you found that can load FITS files in Python. Which package do you think is the best one to use? If you found more than one package, how did you decide which one to choose?
I used astropy.io.fits in order to open the FITS file. I could not find another package that we could use, but I am sure there is others out there. Using the fits feature inside of astropy.io, I was able to open and load in the data fairly easily and efficiently.
**✅ Question 5 [2 points]:** Is the package already installed on your computer? If so, how did you determine this?
My package was not installed on my computer, and I had to pip install this. I knew it was not because when I tried to import the package, I ran into an error which was an immediate red flag that the package had not yet been installed.
**✅ Question 6 [2 points]:** If the package isn't already installed, put the command to install the package in the cell below. If the package *is* already installed, what command would you have used to install the package?
!pip install astropy
#### Loading the data
The data that you're working with are actually images of the Orion Nebula (M42) and come from the [European South Observatory's Digital Sky Survey](http://archive.eso.org/dss/dss) and can be publicly downloaded [here](https://www.spacetelescope.org/projects/fits_liberator/m42data/). The "red" image is from the "$R$" filter from the telescope, which views the sky at red wavelengths and the "ir" image is from "$I$" filter, which views the telescope at infrared wavelengths. If you're not familiar with the term "infrared," it literally translates to "below red" and indicates that the wavelength of the light is longer then the red part of the [electromagnetic spectrum](https://en.wikipedia.org/wiki/Electromagnetic_spectrum).
**✅ Question 7 [6 points]:** Now that you have a Python package that can open FITS files, **read both files into your notebook and print the mean, standard deviation, maximum, and minimum for each file**.
**Note:** If you can't figure out how to load the data file, use the following to lines of code as a replacement for the real data (you will lose the points for this question, but you'll be able to continue on in the assignment):
``` python
image_data_red = np.random.uniform(0,10000,size=(1000,1000))
image_data_ir = np.random.exponential(600,size=(1000,1000))
```
```
# Read in botht he I and R data sets
!curl -O https://raw.githubusercontent.com/msu-cmse-courses/cmse202-S21-student/master/data/m42_40min_ir.fits
image_data = fits.open("m42_40min_ir.fits")
image_data_ir = fits.getdata("m42_40min_ir.fits", ext = 0)
!curl -O https://raw.githubusercontent.com/msu-cmse-courses/cmse202-S21-student/master/data/m42_40min_red.fits
image_data_r = fits.open("m42_40min_red.fits")
image_data_red = fits.getdata("m42_40min_red.fits", ext = 0)
import numpy as np
#Print the mean, std, min, and max using the numpy features for both data sets
print("IR")
print("Mean:", round(np.mean(image_data_ir),3))
print("Standard Deviation:", round(np.std(image_data_ir),3))
print("Minimum:", np.min(image_data_ir))
print("Maximum:", np.max(image_data_ir))
print("\nRed")
print("Mean:", round(np.mean(image_data_red),3))
print("Standard Deviation:", round(np.std(image_data_red),3))
print("Minimum:", np.min(image_data_red))
print("Maximum:", np.max(image_data_red))
```
---
### 🛑 STOP
**Pause to commit your changes to your Git repository!**
Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing part 3.2", and push the changes to GitHub.
---
### 3.3: Working with the the data (22 points)
Now that you've got the FITS files loaded into Python, it's time to start exploring the data a bit. You've already computed some simple statistics, but now you should take it a step further and try to understand the distribution of pixel values and plot the images.
**✅ Question 8 [6 points]:** Using **NumPy** compute the histogram for both the $R$ filter image and $I$ filter image using **50 bins**. You can assume that the values in the images represent **pixel brightness** (you do not need to worry about the units for this values)
*Important note*: When reviewing the documentation for NumPy's histogram function, make sure you know what the properties are of the variables that are returned from the functions!
Once you have your histogram values, **make a plot that contains the histograms for both images showing the pixel count as a function of pixel brightness**. Use the `step()` function in matplotlib to make your plot so that it looks like a more traditional histogram. **Make sure you include appropriate labels on your plot!**
```
# Import the matplotlib package
import matplotlib.pyplot as plt
%matplotlib inline
#https://www.tutorialspoint.com/numpy/numpy_histogram_using_matplotlib.htm np.histogram()
#https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.step.html step()
#Use only the first 50 bin numbers from the np.histogram function as it is included in last bin
vals_ir, bins_ir = np.histogram(image_data_ir, bins = 50)
plt.step(bins_ir[:-1], vals_ir)
vals_red, bins_red = np.histogram(image_data_red, bins = 50)
plt.step(bins_red[:-1], vals_red)
plt.xlabel("Pixel Brigtness")
plt.ylabel("Count")
plt.title("Pixel Brightness for I and R filter")
plt.legend(["I Filter", "R Filter"])
```
**✅ Question 9 [2 points]:** In looking at the histograms, what can you say about the properties of the $R$ filter image and the $I$ filter images? Which one is dominated by a large number of dark pixels? Which one has a nearly uniform, non-neglible pixel count for a wide range of pixel brightness?
We can see from the histogram that the I filter is dominated by lower brightness or a higher number of dark filters while the R filter has an even amount of pixels at each brightness exluding a peak between 17000 and 20000 in pixel brightness. This means we can assume that the I filter will have a much darker look to it, while the R filter will most likely be brighter.
Now that you have a bit of understanding about the the properties of the images are, let's see if the images themselves match your expectations.
**✅ Question 10 [6 points]:** **Make two separate figures,** one that includes a plot of the $R$ filter and one that includes a plot of the $I$ filter. **Choose a colormap other than the default, but use the same colormap for each image**.
**Make sure you include a colorbar** on the figures so that you can tell what the pixel values are and **ensure that the (0,0) point is the the lower left corner of the image so that the orientation matches that of this image:**
<div align="center"><img src="https://www.noao.edu/image_gallery/images/d3/02677.jpg" width=200px alt="Image credit: www.noao.edu"></div>
```
# https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.pyplot.imshow.html
#Use a subplot to put the two filters maps with each other.
plt.subplot(2,1,1)
plt.imshow(image_data_ir, cmap = 'YlGn', origin = 'lower')
plt.title("I Filter")
plt.colorbar()
plt.subplot(2,1,2)
plt.imshow(image_data_red, cmap = 'YlGn', origin = 'lower')
plt.title("R Filter")
plt.tight_layout()
plt.colorbar()
```
**✅ Question 11 [2 points]:** Do the resulting images make sense in the context of your histogram plot? Explain why or why not.
This makes sense as from our histogram, we see that the R filter was more evenly spread out and there were more areas with higher pixel brightness. This is seen in our figures as there is an abundance of green and very little yellow (Green signifies higher values).
Sometimes when astronomers are trying to understand the properties of an object they are are looking at they, they create "[color-color diagrams](https://en.wikipedia.org/wiki/Color%E2%80%93color_diagram)". These diagrams define "colors" by computing the difference between two different image filters. You don't need to understand the exact details behind color-color diagrams for this part of the assignment, but you're going to use the data you have available to do something similar!
**✅ Question 12 [4 points]:** Write a function that takes in two different image arrays, plots the "difference image," and returns the difference image array. **Test out your function so that you produce an "$R$-$I$" image (you want to give the function the red image and the IR image such that the image that is returned is "red" - "IR" but your function should work for _any two images_.**
```
# Define the function name and parameters
def diff_image(arr1, arr2):
#Calculate the difference of the two arrays
final_array = arr2 - arr1
#Plot the differece array with a color bar
plt.imshow(final_array, origin = 'lower', cmap = 'YlGn')
plt.colorbar()
#Return the difference array
return final_array
diff_image(image_data_ir, image_data_red)
```
**✅ Question 13 [2 points]:** What can you learn from your difference image? Which part(s) of the image is(are) brighter in the $I$ filter than the the $R$ filter?
From our difference image, we can see that areas that the I filter had much brighter pixels in the middle of our figure. This is shown by the light yellow in our figure that is mapped to around -10000 on our colorbar.
---
### 🛑 STOP
**Pause to commit your changes to your Git repository!**
Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing part 3.3", and push the changes to GitHub.
---
### 3.4: Using a specialized package for visualization of data (6 points)
Now that you've spent some time exploring the nature of these images, we're going to try to use some of the tools that are unique to the package you've been using to read the FITS files into your notebook. In particular, we're going to try to use the header information associated with the FITS files to make a plot that uses the "World Coordinate System" so that instead of just plotting the image dimensions in terms of pixel position, we'll have a plot where "Right Ascension" is on the $x$-axis and "Declination" is on the $y$-axis. These coordinates are what astronomers use to navigate the sky.
**✅ Question 14 [6 points]:** Using the documentation page for the new Python package you've been using, or any other examples you can find on the internet, **make a plot using the World Coordinate System** for the $I$ filter image. The package will use the information from the header of the FITS file to define a set of axes that corresponding to the physical coordinates of the image. If all goes well, you should end up with something that looks like this:
<div align="center"><img src="https://i.ibb.co/k0sCXcf/m42-ir-RADEC.png" alt="m42-ir-RADEC" border="0"></div>
**Important note:** You made end up getting some WARNINGs in your notebook when you do this step, but you should be able to safely ignore those. However, if you run into actual errors, you need to troubleshoot those!
```
#https://docs.astropy.org/en/stable/visualization/wcsaxes/
# Import needed modules and read the file in
import matplotlib.pyplot as plt
from astropy.wcs import WCS
from astropy.utils.data import get_pkg_data_filename
ir_data = fits.open("https://raw.githubusercontent.com/msu-cmse-courses/cmse202-S21-student/master/data/m42_40min_ir.fits")[0]
#Set a world coordinate system
wcs = WCS(ir_data.header)
#Plot the WCS from the I data.
plt.subplot(projection=wcs)
plt.imshow(ir_data.data, origin='lower')
plt.xlabel("Right Ascension")
plt.ylabel("Declintation")
cb = plt.colorbar()
cb.set_label("Brightness")
```
---
### 🛑 STOP
**Pause to commit your changes to your Git repository!**
Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing part 3.4", and push the changes to GitHub.
---
### 3.5: Writing Python functions for doing image manipulation and processing (14 points)
Now that you've been able to read, manipulate, and display FITS file images, we're going to work on building some Python functions to interact with these files and does some very simple image processing.
**✅ Question 15 [10 points]:** In order to simplify the process of "observing" the nebula that's we've been looking at thus far, you're going to build the following functions:
1. A `load_images` function that takes two image filenames as inputs, loads the corresponding FITS files, and returns a **dictionary** where the keys in the dictionary are the filenames and the entries in the dictionary are the corresponding image arrays.
2. A `calc_stats` function that takes a dictionary of image information (like the one returned by your `load_images` function) as input and **prints the mean and standard deviation of all images in the dictionary**. Make sure that the print statements indicate which image the values correspond to by using the filenames that are stored in the dictionary.
3. A `make_composite` function that takes your two filenames and your dictionary of image information as input and creates a 3D NumPy array that represents a 2D image and it's corresponding "R" "G" and "B" values. The Red (R), Green (G), and Blue (B) channels should be defined in the following ways:
1. The red channel should be defined as
$$ 1.5 \times \frac{\mathrm{I~filter~image~array}}{\mathrm{The~maximum~of~the~R~filter~image~array}}$$
2. The green channel should be based on the average pixels values, speficially defined as
$$ \frac{\mathrm{(I~filter~image~array + R~filter~image~array)/2}}{\mathrm{The~maximum~of~the~R~filter~image~array}}$$
3. The blue channel should be defined as
$$ \frac{\mathrm{R~filter~image~array}}{\mathrm{The~maximum~of~the~R~filter~image~array}}$$
When this function is called it should **display the "false color" image you've created by using `plt.imshow()`**
**A starter function and the code for creating the red channel has been provided for you for the `make_composite` function!**
For the `make_composite` function, you may run into issues with some of your image data values not being of the correct type to do the some of math necessary to make the composite image, so you may need to convert some of the values to the appropriate type. Also, make sure you understand what the provided code is doing, especially when it comes to "clipping" the RGB values!
```
#Define the load images function and parameters
def load_images(f1, f2):
#Get both data sets
data1 = fits.getdata(f1, ext = 0)
data2 = fits.getdata(f2, ext = 0)
#Create a dictionary that takes file name as keys and data arrays as values
dictionary = {}
dictionary[f1] = data1
dictionary[f2] = data2
return dictionary
#Define calc states function
def calc_stats(im_dict):
#Go through each key in dictionary
for f in im_dict:
#Print the mean and std for each key rounded to 3 decimals.
print("Mean:", round(np.mean(im_dict[f]), 3))
print("Standard Deviation:", round(np.std(im_dict[f]),3))
# Here is a starting point for the "make_composite" function
def make_composite(f1, f2, im_dict):
'''
This function takes in the following:
f1 : file name for the "R" filter image
f2 : file name for the "I" filter image
im_dict : a dictionary that contains the image arrays as entries that match the file names
'''
# Define the array for storing RGB values
rgb = np.zeros((im_dict[f1].shape[0],im_dict[f1].shape[1],3))
# Define a normalization factor for our denominator using the R filter image
norm_factor = im_dict[f1].astype("float").max()
# Compute the red channel values and then clip them to ensure nothing is > 1.0
rgb[:,:,0] = (im_dict[f2].astype("float")/norm_factor) * 1.5
rgb[:,:,0][rgb[:,:,0] > 1.0] = 1.0
#Compute the green channel and make sure nothing is over 1.0
rgb[:,:,1] = ((im_dict[f2].astype("float") + im_dict[f1].astype("float"))/2) / norm_factor
rgb[:,:,1][rgb[:,:,1] > 1.0] = 1.0
#Compute the blue channel and make sure nothing is over 1.0
rgb[:,:,2] = (im_dict[f1].astype("float")/norm_factor)
rgb[:,:,2][rgb[:,:,2] > 1.0] = 1.0
#Plot the rgb values
plt.imshow(rgb, origin = 'lower',cmap = 'BuPu')
```
**✅ Question 16 [4 points]:** Now that you've defined your class methods, you're going to put them to use. In the following cell:
1. Load the images using your `load_images()` function.
2. Compute the basic image statistics by calling the `calc_stats` function.
3. Create a false color image using the `make_composite` function.
* If all goes well, you should end up with a composite image that looks something like this:
<div align="center"><img src="https://i.ibb.co/X2mK6qN/m42-falsecolor-final.png" alt="m42-falsecolor-final" border="0"></div>
**Important note:** It is not required that your final composite image has the Right Ascension and Declination coordinates, but if you figured out how to do this in the previous section, I encourage you to include it!
**Another important note**: If you never managed to get the FITS file data loaded in, you can use the fake data image arrays from previously:
``` python
image_data_red = np.random.uniform(0,10000,size=(1000,1000))
image_data_ir = np.random.exponential(600,size=(1000,1000))
```
```
#Read in the data sets with load_images function
new_dict = load_images("m42_40min_ir.fits", "m42_40min_red.fits")
#Print the stats for each data set in dictionary
calc_stats(new_dict)
#Adjust the RGB values and plot the result using make composite function.
make_composite( "m42_40min_red.fits", "m42_40min_ir.fits", new_dict)
```
---
### 🛑 STOP
**Pause to commit your changes to your Git repository!**
Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing part 3.5", and push the changes to GitHub.
---
---
## Assignment wrap-up
Please fill out the form that appears when you run the code below. **You must completely fill this out in order to receive credit for the assignment!**
```
from IPython.display import HTML
HTML(
"""
<iframe
src="https://forms.office.com/Pages/ResponsePage.aspx?id=MHEXIi9k2UGSEXQjetVofddd5T-Pwn1DlT6_yoCyuCFUMVlDR0FZWllFS0NEUUc3V1NZVEZUUjRPWC4u"
width="800px"
height="600px"
frameborder="0"
marginheight="0"
marginwidth="0">
Loading...
</iframe>
"""
)
```
### Congratulations, you're done!
Submit this assignment by uploading it to the course Desire2Learn web page. Go to the "Homework Assignments" folder, find the dropbox link for Homework #1, and upload it there.
© Copyright 2020, Department of Computational Mathematics, Science and Engineering at Michigan State University
|
github_jupyter
|
for i in range(10):
print("The value of i is %i" %i)
#Missing a colon after the for statement.
def compute_fraction(numerator, denominator):
if denominator == 0:
print("Error: Cannot Divide by 0. Enter a new denominator.")
else:
fraction = numerator/denominator
print("The value of the fraction is %f" %fraction)
compute_fraction(5, 0)
#Cannot Divide by 0. Add an error message if denominator is 0.
def compute_fraction(numerator, denominator):
if type(numerator) == str or type(denominator) == str:
print("Cannot do math operations with type string")
else:
fraction = numerator/denominator
print("The value of the fraction is %f" %fraction)
compute_fraction("one", 25)
#Cannot do math operations with strings. Print error message saying must be int or float
import numpy as np
n = np.arange(20)
print("The value of the 10th element is %d" %n[9])
#Use brackets instead of parenthesis.
odd = [1, 3, 5, 7, 9]
even = [2, 4, 6, 8, 10]
for i in odd:
print(i)
for j in even:
print(j)
#Even list was spelled 'evven' in the for statement.
spanish = dict()
spanish['hello'] = 'hola'
spanish['yes'] = 'si'
spanish['one'] = 'uno'
spanish['two'] = 'dos'
spanish['three'] = 'tres'
spanish['red'] = 'rojo'
spanish['black'] = 'negro'
spanish['green'] = 'verde'
spanish['blue'] = 'azul'
spanish['orange'] = 'anaranjado'
print(spanish["hello"])
print(spanish["one"], spanish["two"], spanish["three"])
print(spanish["orange"])
#Orange was not an initialized key in the dictionary.
#!pip install astropy
#Import the needed functions
from astropy.io import fits
import pandas as pd
#Use curl command and fits functions to read the data in
!curl -O https://raw.githubusercontent.com/msu-cmse-courses/cmse202-S21-student/master/data/m42_40min_ir.fits
ir = fits.open("m42_40min_ir.fits")
ir_data = fits.getdata("m42_40min_ir.fits", ext = 0)
ir_data
---
### 🛑 STOP
**Pause to commit your changes to your Git repository!**
Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing part 3.2", and push the changes to GitHub.
---
### 3.3: Working with the the data (22 points)
Now that you've got the FITS files loaded into Python, it's time to start exploring the data a bit. You've already computed some simple statistics, but now you should take it a step further and try to understand the distribution of pixel values and plot the images.
**✅ Question 8 [6 points]:** Using **NumPy** compute the histogram for both the $R$ filter image and $I$ filter image using **50 bins**. You can assume that the values in the images represent **pixel brightness** (you do not need to worry about the units for this values)
*Important note*: When reviewing the documentation for NumPy's histogram function, make sure you know what the properties are of the variables that are returned from the functions!
Once you have your histogram values, **make a plot that contains the histograms for both images showing the pixel count as a function of pixel brightness**. Use the `step()` function in matplotlib to make your plot so that it looks like a more traditional histogram. **Make sure you include appropriate labels on your plot!**
**✅ Question 9 [2 points]:** In looking at the histograms, what can you say about the properties of the $R$ filter image and the $I$ filter images? Which one is dominated by a large number of dark pixels? Which one has a nearly uniform, non-neglible pixel count for a wide range of pixel brightness?
We can see from the histogram that the I filter is dominated by lower brightness or a higher number of dark filters while the R filter has an even amount of pixels at each brightness exluding a peak between 17000 and 20000 in pixel brightness. This means we can assume that the I filter will have a much darker look to it, while the R filter will most likely be brighter.
Now that you have a bit of understanding about the the properties of the images are, let's see if the images themselves match your expectations.
**✅ Question 10 [6 points]:** **Make two separate figures,** one that includes a plot of the $R$ filter and one that includes a plot of the $I$ filter. **Choose a colormap other than the default, but use the same colormap for each image**.
**Make sure you include a colorbar** on the figures so that you can tell what the pixel values are and **ensure that the (0,0) point is the the lower left corner of the image so that the orientation matches that of this image:**
<div align="center"><img src="https://www.noao.edu/image_gallery/images/d3/02677.jpg" width=200px alt="Image credit: www.noao.edu"></div>
**✅ Question 11 [2 points]:** Do the resulting images make sense in the context of your histogram plot? Explain why or why not.
This makes sense as from our histogram, we see that the R filter was more evenly spread out and there were more areas with higher pixel brightness. This is seen in our figures as there is an abundance of green and very little yellow (Green signifies higher values).
Sometimes when astronomers are trying to understand the properties of an object they are are looking at they, they create "[color-color diagrams](https://en.wikipedia.org/wiki/Color%E2%80%93color_diagram)". These diagrams define "colors" by computing the difference between two different image filters. You don't need to understand the exact details behind color-color diagrams for this part of the assignment, but you're going to use the data you have available to do something similar!
**✅ Question 12 [4 points]:** Write a function that takes in two different image arrays, plots the "difference image," and returns the difference image array. **Test out your function so that you produce an "$R$-$I$" image (you want to give the function the red image and the IR image such that the image that is returned is "red" - "IR" but your function should work for _any two images_.**
**✅ Question 13 [2 points]:** What can you learn from your difference image? Which part(s) of the image is(are) brighter in the $I$ filter than the the $R$ filter?
From our difference image, we can see that areas that the I filter had much brighter pixels in the middle of our figure. This is shown by the light yellow in our figure that is mapped to around -10000 on our colorbar.
---
### 🛑 STOP
**Pause to commit your changes to your Git repository!**
Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing part 3.3", and push the changes to GitHub.
---
### 3.4: Using a specialized package for visualization of data (6 points)
Now that you've spent some time exploring the nature of these images, we're going to try to use some of the tools that are unique to the package you've been using to read the FITS files into your notebook. In particular, we're going to try to use the header information associated with the FITS files to make a plot that uses the "World Coordinate System" so that instead of just plotting the image dimensions in terms of pixel position, we'll have a plot where "Right Ascension" is on the $x$-axis and "Declination" is on the $y$-axis. These coordinates are what astronomers use to navigate the sky.
**✅ Question 14 [6 points]:** Using the documentation page for the new Python package you've been using, or any other examples you can find on the internet, **make a plot using the World Coordinate System** for the $I$ filter image. The package will use the information from the header of the FITS file to define a set of axes that corresponding to the physical coordinates of the image. If all goes well, you should end up with something that looks like this:
<div align="center"><img src="https://i.ibb.co/k0sCXcf/m42-ir-RADEC.png" alt="m42-ir-RADEC" border="0"></div>
**Important note:** You made end up getting some WARNINGs in your notebook when you do this step, but you should be able to safely ignore those. However, if you run into actual errors, you need to troubleshoot those!
---
### 🛑 STOP
**Pause to commit your changes to your Git repository!**
Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing part 3.4", and push the changes to GitHub.
---
### 3.5: Writing Python functions for doing image manipulation and processing (14 points)
Now that you've been able to read, manipulate, and display FITS file images, we're going to work on building some Python functions to interact with these files and does some very simple image processing.
**✅ Question 15 [10 points]:** In order to simplify the process of "observing" the nebula that's we've been looking at thus far, you're going to build the following functions:
1. A `load_images` function that takes two image filenames as inputs, loads the corresponding FITS files, and returns a **dictionary** where the keys in the dictionary are the filenames and the entries in the dictionary are the corresponding image arrays.
2. A `calc_stats` function that takes a dictionary of image information (like the one returned by your `load_images` function) as input and **prints the mean and standard deviation of all images in the dictionary**. Make sure that the print statements indicate which image the values correspond to by using the filenames that are stored in the dictionary.
3. A `make_composite` function that takes your two filenames and your dictionary of image information as input and creates a 3D NumPy array that represents a 2D image and it's corresponding "R" "G" and "B" values. The Red (R), Green (G), and Blue (B) channels should be defined in the following ways:
1. The red channel should be defined as
$$ 1.5 \times \frac{\mathrm{I~filter~image~array}}{\mathrm{The~maximum~of~the~R~filter~image~array}}$$
2. The green channel should be based on the average pixels values, speficially defined as
$$ \frac{\mathrm{(I~filter~image~array + R~filter~image~array)/2}}{\mathrm{The~maximum~of~the~R~filter~image~array}}$$
3. The blue channel should be defined as
$$ \frac{\mathrm{R~filter~image~array}}{\mathrm{The~maximum~of~the~R~filter~image~array}}$$
When this function is called it should **display the "false color" image you've created by using `plt.imshow()`**
**A starter function and the code for creating the red channel has been provided for you for the `make_composite` function!**
For the `make_composite` function, you may run into issues with some of your image data values not being of the correct type to do the some of math necessary to make the composite image, so you may need to convert some of the values to the appropriate type. Also, make sure you understand what the provided code is doing, especially when it comes to "clipping" the RGB values!
**✅ Question 16 [4 points]:** Now that you've defined your class methods, you're going to put them to use. In the following cell:
1. Load the images using your `load_images()` function.
2. Compute the basic image statistics by calling the `calc_stats` function.
3. Create a false color image using the `make_composite` function.
* If all goes well, you should end up with a composite image that looks something like this:
<div align="center"><img src="https://i.ibb.co/X2mK6qN/m42-falsecolor-final.png" alt="m42-falsecolor-final" border="0"></div>
**Important note:** It is not required that your final composite image has the Right Ascension and Declination coordinates, but if you figured out how to do this in the previous section, I encourage you to include it!
**Another important note**: If you never managed to get the FITS file data loaded in, you can use the fake data image arrays from previously:
---
### 🛑 STOP
**Pause to commit your changes to your Git repository!**
Take a moment to save your notebook, commit the changes to your Git repository using the commit message "Committing part 3.5", and push the changes to GitHub.
---
---
## Assignment wrap-up
Please fill out the form that appears when you run the code below. **You must completely fill this out in order to receive credit for the assignment!**
| 0.729327 | 0.901575 |
```
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
```
# Exploratory Climate Analysis
```
# Design a query to retrieve the last 12 months of precipitation data and plot the results
lminfo = session.query(Measurement.date).\
order_by(Measurement.date.desc()).first()
lminfo=list(np.ravel(lminfo))[0]
print(lminfo)
# Calculate the date 1 year ago from the last data point in the database
yearago=dt.date(2017, 8, 23)- dt.timedelta(days=365)
# Perform a query to retrieve the data and precipitation scores
prec= session.query(Measurement.date, Measurement.prcp)\
.filter(Measurement.date>yearago)\
.order_by(Measurement.date).all()
# Save the query results as a Pandas DataFrame and set the index to the date column
precTable=pd.DataFrame(prec)
precTable=precTable.set_index('date')
# Sort the dataframe by date
precTable=precTable.sort_index(ascending=True)
# Use Pandas Plotting with Matplotlib to plot the data
precTable.plot(title="Precipitation")
# Use Pandas to calcualte the summary statistics for the precipitation data
precTable.describe()
# Design a query to show how many stations are available in this dataset?
stations=session.query(Station).count()
print(f"There are {stations} stations")
# What are the most active stations? (i.e. what stations have the
#most rows)?
# List the stations and the counts in descending order.
activestations=session.query(Measurement.station, func.count(Measurement.station))\
.group_by(Measurement.station)\
.order_by(func.count(Measurement.station).desc()).all()
activestations
# Using the station id from the previous query, calculateof the most active station:
ID=activestations[0][0]
#most active station
SName=session.query(Station.name).filter_by(station = ID)
SName=SName[0][0]
print (f"{SName} is the most active station")
#the lowest temperature recorded,
lowT=session.query(Measurement.tobs).filter(Measurement.station == ID)\
.order_by(Measurement.tobs.asc()).first()
lowT=lowT[0]
print (f"The lowest temperature is {lowT}")
#highest temperature recorded,
highT=session.query(Measurement.tobs).filter(Measurement.station == ID)\
.order_by(Measurement.tobs.desc()).first()
highT=highT[0]
print (f"The highest temperature is {highT}")
#average temperature
avT=session.query(func.avg(Measurement.tobs))\
.filter(Measurement.station == ID)
avT=avT[0][0]
print (f"The average temperature is {avT}")
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
HObs = session.query(Measurement.date, Measurement.tobs)\
.filter (Measurement.date>yearago)\
.filter (Measurement.station == ID)\
.order_by (Measurement.date)\
.all()
HObsDF=pd.DataFrame(HObs)
plt.hist(HObsDF['tobs'],12)
plt.xlabel("Temperature")
plt.ylabel("Observations")
plt.title("12 months of temperature observation")
plt.show()
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate
#the tmin, tavg, and tmax for your trip using the
#previous year's data for those same dates.
# function usage example
TripR=calc_temps('2011-02-28', '2011-03-05')
TripR
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
TripDF=pd.DataFrame(TripR, columns=['Min', 'Avg', 'Max'])
AvgT=TripDF['Avg']
MMT=TripDF.iloc[0]['Max'] - TripDF.iloc[0]['Min']
AvgT.plot(kind='bar', yerr=MMT, figsize=(6,8), alpha=0.5)
plt.title("Trip Avg Temp")
plt.ylabel("T")
plt.xticks([])
plt.grid()
plt.show()
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
TotalRain = session.query(Station.station, func.sum(Measurement.prcp),\
Station.name, Station.elevation, Station.latitude,\
Station.longitude).filter(Measurement.station == Station.station)\
.filter(Measurement.date >= '2011-02-28')\
.filter(Measurement.date <= '2011-03-05')\
.group_by(Station.station).order_by(func.sum(Measurement.prcp).desc())
for row in TotalRain:
print (f"Station: {row[0]}, Rainfall: {row[1]}, Location: {row[2]}, Elevation: {row[3]}, Latitude: {row[4]}, Longitude: {row[5]}")
print()
```
## Optional Challenge Assignment
```
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
# Set the start and end date of the trip
# Use the start and end date to create a range of dates
# Stip off the year and save a list of %m-%d strings
# Loop through the list of %m-%d strings and calculate the normals for each date
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
# Plot the daily normals as an area plot with `stacked=False`
```
|
github_jupyter
|
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# Design a query to retrieve the last 12 months of precipitation data and plot the results
lminfo = session.query(Measurement.date).\
order_by(Measurement.date.desc()).first()
lminfo=list(np.ravel(lminfo))[0]
print(lminfo)
# Calculate the date 1 year ago from the last data point in the database
yearago=dt.date(2017, 8, 23)- dt.timedelta(days=365)
# Perform a query to retrieve the data and precipitation scores
prec= session.query(Measurement.date, Measurement.prcp)\
.filter(Measurement.date>yearago)\
.order_by(Measurement.date).all()
# Save the query results as a Pandas DataFrame and set the index to the date column
precTable=pd.DataFrame(prec)
precTable=precTable.set_index('date')
# Sort the dataframe by date
precTable=precTable.sort_index(ascending=True)
# Use Pandas Plotting with Matplotlib to plot the data
precTable.plot(title="Precipitation")
# Use Pandas to calcualte the summary statistics for the precipitation data
precTable.describe()
# Design a query to show how many stations are available in this dataset?
stations=session.query(Station).count()
print(f"There are {stations} stations")
# What are the most active stations? (i.e. what stations have the
#most rows)?
# List the stations and the counts in descending order.
activestations=session.query(Measurement.station, func.count(Measurement.station))\
.group_by(Measurement.station)\
.order_by(func.count(Measurement.station).desc()).all()
activestations
# Using the station id from the previous query, calculateof the most active station:
ID=activestations[0][0]
#most active station
SName=session.query(Station.name).filter_by(station = ID)
SName=SName[0][0]
print (f"{SName} is the most active station")
#the lowest temperature recorded,
lowT=session.query(Measurement.tobs).filter(Measurement.station == ID)\
.order_by(Measurement.tobs.asc()).first()
lowT=lowT[0]
print (f"The lowest temperature is {lowT}")
#highest temperature recorded,
highT=session.query(Measurement.tobs).filter(Measurement.station == ID)\
.order_by(Measurement.tobs.desc()).first()
highT=highT[0]
print (f"The highest temperature is {highT}")
#average temperature
avT=session.query(func.avg(Measurement.tobs))\
.filter(Measurement.station == ID)
avT=avT[0][0]
print (f"The average temperature is {avT}")
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
HObs = session.query(Measurement.date, Measurement.tobs)\
.filter (Measurement.date>yearago)\
.filter (Measurement.station == ID)\
.order_by (Measurement.date)\
.all()
HObsDF=pd.DataFrame(HObs)
plt.hist(HObsDF['tobs'],12)
plt.xlabel("Temperature")
plt.ylabel("Observations")
plt.title("12 months of temperature observation")
plt.show()
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate
#the tmin, tavg, and tmax for your trip using the
#previous year's data for those same dates.
# function usage example
TripR=calc_temps('2011-02-28', '2011-03-05')
TripR
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
TripDF=pd.DataFrame(TripR, columns=['Min', 'Avg', 'Max'])
AvgT=TripDF['Avg']
MMT=TripDF.iloc[0]['Max'] - TripDF.iloc[0]['Min']
AvgT.plot(kind='bar', yerr=MMT, figsize=(6,8), alpha=0.5)
plt.title("Trip Avg Temp")
plt.ylabel("T")
plt.xticks([])
plt.grid()
plt.show()
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
TotalRain = session.query(Station.station, func.sum(Measurement.prcp),\
Station.name, Station.elevation, Station.latitude,\
Station.longitude).filter(Measurement.station == Station.station)\
.filter(Measurement.date >= '2011-02-28')\
.filter(Measurement.date <= '2011-03-05')\
.group_by(Station.station).order_by(func.sum(Measurement.prcp).desc())
for row in TotalRain:
print (f"Station: {row[0]}, Rainfall: {row[1]}, Location: {row[2]}, Elevation: {row[3]}, Latitude: {row[4]}, Longitude: {row[5]}")
print()
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
# Set the start and end date of the trip
# Use the start and end date to create a range of dates
# Stip off the year and save a list of %m-%d strings
# Loop through the list of %m-%d strings and calculate the normals for each date
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
# Plot the daily normals as an area plot with `stacked=False`
| 0.81626 | 0.891244 |
```
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from pathlib import Path
from collections import Counter
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from imblearn.metrics import classification_report_imbalanced
```
# Read the CSV and Perform Basic Data Cleaning
```
# https://help.lendingclub.com/hc/en-us/articles/215488038-What-do-the-different-Note-statuses-mean-
columns = [
"loan_amnt", "int_rate", "installment", "home_ownership",
"annual_inc", "verification_status", "issue_d", "loan_status",
"pymnt_plan", "dti", "delinq_2yrs", "inq_last_6mths",
"open_acc", "pub_rec", "revol_bal", "total_acc",
"initial_list_status", "out_prncp", "out_prncp_inv", "total_pymnt",
"total_pymnt_inv", "total_rec_prncp", "total_rec_int", "total_rec_late_fee",
"recoveries", "collection_recovery_fee", "last_pymnt_amnt", "next_pymnt_d",
"collections_12_mths_ex_med", "policy_code", "application_type", "acc_now_delinq",
"tot_coll_amt", "tot_cur_bal", "open_acc_6m", "open_act_il",
"open_il_12m", "open_il_24m", "mths_since_rcnt_il", "total_bal_il",
"il_util", "open_rv_12m", "open_rv_24m", "max_bal_bc",
"all_util", "total_rev_hi_lim", "inq_fi", "total_cu_tl",
"inq_last_12m", "acc_open_past_24mths", "avg_cur_bal", "bc_open_to_buy",
"bc_util", "chargeoff_within_12_mths", "delinq_amnt", "mo_sin_old_il_acct",
"mo_sin_old_rev_tl_op", "mo_sin_rcnt_rev_tl_op", "mo_sin_rcnt_tl", "mort_acc",
"mths_since_recent_bc", "mths_since_recent_inq", "num_accts_ever_120_pd", "num_actv_bc_tl",
"num_actv_rev_tl", "num_bc_sats", "num_bc_tl", "num_il_tl",
"num_op_rev_tl", "num_rev_accts", "num_rev_tl_bal_gt_0",
"num_sats", "num_tl_120dpd_2m", "num_tl_30dpd", "num_tl_90g_dpd_24m",
"num_tl_op_past_12m", "pct_tl_nvr_dlq", "percent_bc_gt_75", "pub_rec_bankruptcies",
"tax_liens", "tot_hi_cred_lim", "total_bal_ex_mort", "total_bc_limit",
"total_il_high_credit_limit", "hardship_flag", "debt_settlement_flag"
]
target = ["loan_status"]
# Load the data
file_path = Path('../Resources/LoanStats_2019Q1.csv.zip')
df = pd.read_csv(file_path, skiprows=1)[:-2]
df = df.loc[:, columns].copy()
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
# Remove the `Issued` loan status
issued_mask = df['loan_status'] != 'Issued'
df = df.loc[issued_mask]
# convert interest rate to numerical
df['int_rate'] = df['int_rate'].str.replace('%', '')
df['int_rate'] = df['int_rate'].astype('float') / 100
# Convert the target column values to low_risk and high_risk based on their values
x = {'Current': 'low_risk'}
df = df.replace(x)
x = dict.fromkeys(['Late (31-120 days)', 'Late (16-30 days)', 'Default', 'In Grace Period'], 'high_risk')
df = df.replace(x)
df.reset_index(inplace=True, drop=True)
df.head()
```
# Split the Data into Training and Testing
```
# Create our features
X = df.drop(columns='loan_status')
X = pd.get_dummies(X)
# Create our target
y = df.loc[:, target].copy()
X.describe()
# Check the balance of our target values
y['loan_status'].value_counts()
# Split the X and y into X_train, X_test, y_train, y_test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
```
# Ensemble Learners
In this section, you will compare two ensemble algorithms to determine which algorithm results in the best performance. You will train a Balanced Random Forest Classifier and an Easy Ensemble AdaBoost classifier . For each algorithm, be sure to complete the folliowing steps:
1. Train the model using the training data.
2. Calculate the balanced accuracy score from sklearn.metrics.
3. Print the confusion matrix from sklearn.metrics.
4. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn.
5. For the Balanced Random Forest Classifier onely, print the feature importance sorted in descending order (most important feature to least important) along with the feature score
Note: Use a random state of 1 for each algorithm to ensure consistency between tests
### Balanced Random Forest Classifier
```
# Resample the training data with the RandomOversampler
from imblearn.ensemble import BalancedRandomForestClassifier
brf = BalancedRandomForestClassifier(n_estimators=100, random_state=1)
brf.fit(X_train, y_train)
# Calculated the balanced accuracy score
y_pred = brf.predict(X_test)
balanced_accuracy_score(y_test, y_pred)
# Display the confusion matrix
confusion_matrix(y_test, y_pred)
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred))
# List the features sorted in descending order by feature importance
importances = brf.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X.shape[1]):
print(f"{X.columns.values[f]}: ({importances[indices[f]]})")
```
### Easy Ensemble AdaBoost Classifier
```
# Train the Classifier
from imblearn.ensemble import EasyEnsembleClassifier
eec = EasyEnsembleClassifier(n_estimators=100, random_state=1)
eec.fit(X_train, y_train)
# Calculated the balanced accuracy score
y_pred = eec.predict(X_test)
balanced_accuracy_score(y_test, y_pred)
# Display the confusion matrix
confusion_matrix(y_test, y_pred)
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred))
```
|
github_jupyter
|
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from pathlib import Path
from collections import Counter
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from imblearn.metrics import classification_report_imbalanced
# https://help.lendingclub.com/hc/en-us/articles/215488038-What-do-the-different-Note-statuses-mean-
columns = [
"loan_amnt", "int_rate", "installment", "home_ownership",
"annual_inc", "verification_status", "issue_d", "loan_status",
"pymnt_plan", "dti", "delinq_2yrs", "inq_last_6mths",
"open_acc", "pub_rec", "revol_bal", "total_acc",
"initial_list_status", "out_prncp", "out_prncp_inv", "total_pymnt",
"total_pymnt_inv", "total_rec_prncp", "total_rec_int", "total_rec_late_fee",
"recoveries", "collection_recovery_fee", "last_pymnt_amnt", "next_pymnt_d",
"collections_12_mths_ex_med", "policy_code", "application_type", "acc_now_delinq",
"tot_coll_amt", "tot_cur_bal", "open_acc_6m", "open_act_il",
"open_il_12m", "open_il_24m", "mths_since_rcnt_il", "total_bal_il",
"il_util", "open_rv_12m", "open_rv_24m", "max_bal_bc",
"all_util", "total_rev_hi_lim", "inq_fi", "total_cu_tl",
"inq_last_12m", "acc_open_past_24mths", "avg_cur_bal", "bc_open_to_buy",
"bc_util", "chargeoff_within_12_mths", "delinq_amnt", "mo_sin_old_il_acct",
"mo_sin_old_rev_tl_op", "mo_sin_rcnt_rev_tl_op", "mo_sin_rcnt_tl", "mort_acc",
"mths_since_recent_bc", "mths_since_recent_inq", "num_accts_ever_120_pd", "num_actv_bc_tl",
"num_actv_rev_tl", "num_bc_sats", "num_bc_tl", "num_il_tl",
"num_op_rev_tl", "num_rev_accts", "num_rev_tl_bal_gt_0",
"num_sats", "num_tl_120dpd_2m", "num_tl_30dpd", "num_tl_90g_dpd_24m",
"num_tl_op_past_12m", "pct_tl_nvr_dlq", "percent_bc_gt_75", "pub_rec_bankruptcies",
"tax_liens", "tot_hi_cred_lim", "total_bal_ex_mort", "total_bc_limit",
"total_il_high_credit_limit", "hardship_flag", "debt_settlement_flag"
]
target = ["loan_status"]
# Load the data
file_path = Path('../Resources/LoanStats_2019Q1.csv.zip')
df = pd.read_csv(file_path, skiprows=1)[:-2]
df = df.loc[:, columns].copy()
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
# Remove the `Issued` loan status
issued_mask = df['loan_status'] != 'Issued'
df = df.loc[issued_mask]
# convert interest rate to numerical
df['int_rate'] = df['int_rate'].str.replace('%', '')
df['int_rate'] = df['int_rate'].astype('float') / 100
# Convert the target column values to low_risk and high_risk based on their values
x = {'Current': 'low_risk'}
df = df.replace(x)
x = dict.fromkeys(['Late (31-120 days)', 'Late (16-30 days)', 'Default', 'In Grace Period'], 'high_risk')
df = df.replace(x)
df.reset_index(inplace=True, drop=True)
df.head()
# Create our features
X = df.drop(columns='loan_status')
X = pd.get_dummies(X)
# Create our target
y = df.loc[:, target].copy()
X.describe()
# Check the balance of our target values
y['loan_status'].value_counts()
# Split the X and y into X_train, X_test, y_train, y_test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
# Resample the training data with the RandomOversampler
from imblearn.ensemble import BalancedRandomForestClassifier
brf = BalancedRandomForestClassifier(n_estimators=100, random_state=1)
brf.fit(X_train, y_train)
# Calculated the balanced accuracy score
y_pred = brf.predict(X_test)
balanced_accuracy_score(y_test, y_pred)
# Display the confusion matrix
confusion_matrix(y_test, y_pred)
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred))
# List the features sorted in descending order by feature importance
importances = brf.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X.shape[1]):
print(f"{X.columns.values[f]}: ({importances[indices[f]]})")
# Train the Classifier
from imblearn.ensemble import EasyEnsembleClassifier
eec = EasyEnsembleClassifier(n_estimators=100, random_state=1)
eec.fit(X_train, y_train)
# Calculated the balanced accuracy score
y_pred = eec.predict(X_test)
balanced_accuracy_score(y_test, y_pred)
# Display the confusion matrix
confusion_matrix(y_test, y_pred)
# Print the imbalanced classification report
print(classification_report_imbalanced(y_test, y_pred))
| 0.602296 | 0.654826 |
```
# Importing packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import re
from functools import reduce
from collections import defaultdict
from sklearn.impute import KNNImputer
from imblearn.over_sampling import SMOTE
from sklearn.pipeline import Pipeline
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, MinMaxScaler, StandardScaler
from sklearn.decomposition import TruncatedSVD
from sklearn.cross_decomposition import PLSSVD
from sklearn.compose import ColumnTransformer
# displaying all the rows and columns
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# setting random seed:
RANDOM_SEED = 42
```
## 1. EDA
```
# Load the data:
data = pd.read_csv('/Users/bangxixiao/Desktop/python_projects/DATA1030_MIDTERM_PROJECT/data/application_data.csv',
encoding='UTF-8')
print('The credit default data has {} rows and {} columns.'.format(data.shape[0], data.shape[1]))
data.info()
```
The data has 307511 entries and 122 columns, where 65 columns are identified as float, 41 columns as integer and 16 columns as object (string).
```
data.head()
```
We noticed that there exists a small proportion of missing data.
```
# describe the data:
data.describe()
```
The columns could be separated into different categories by looking at the prefix of column names:
AMT: data related to specific numbers such as loan annuity, annuity of previous application and so on.
APARTMENTS: apartment-related information
BASEMENTAREA: basement information about building where the clients live
CHANEL: Through which channel we acquired the client on the previous application
CNT: count data such the number of children the client has
CODE: includes gender and why previous application rejected
COMMONAREA: common area information about building where the clients live
DAYS: includes date-related information such as birth, employment
DEF: clients' social surroundings count
ELEVATORS: living condition of clients
EMERGENCYSTATE: living condition of clients
ENTRANCES: living condition of clients
EXT: normalized score from external data source
FLAG: consists of bool values
FLOORSMAX/FLOORSMIN: living condition of clients
HOUR: data regarding the time, for example, the exact hour the client apply for the loan
LANDAREA: living condition of the clients
LIVE: if the client's contact address match with the work address
LIVINGAPARTMENTS/LIVINGAREA: living condition of clients
NAME: mostly strings data regarding the client's personal information
NFLAG: bool-like data
NONLIVINGAPARTMENT/NONLIVINGAREA: living condition of the clients
OBS: number of observations of clients' social surroundings
OCCUPATION: kind of occupation client has
ORGANIZATION: type of organization where client works
OWN: age of client's car
PRODUCT: detailed product combination of the previous application
RATE: fractional number
REG: bool type data, regarding the match of client's permanent address
SELLERPLACE: selling area of seller place of the previous application
SK_: ID-like data
TARGET: target variable
TOTALAREA: living condition of client
WALLSMATERIAL: living condition of client
WEEKDAY: which day in a week the client applies for a loan
YEARS: living condition of client
```
# classify columns to different fields mentioned above:
def get_columns_collection(d):
col_prefix = [x.split('_')[0] for x in d.columns]
col_collection = defaultdict(list)
for cp, c in zip(col_prefix, d.columns):
col_collection[cp].append(c)
return col_collection
# investigating the missing values:
data.isnull().sum().sort_values(ascending=False)
# percentage of missing:
(data.isnull().sum()/len(data)*100).sort_values(ascending=False)
```
Noticed that we have multiple columns having their missing proportions greater than 0; to deal with, we directly remove the features that have missing values greater than a half. For the features that have missing proportions between 0.0% and 50.0%, we will examine them one by one and decide whether to keep (do some imputation) or delete.
```
replace_dict = {
"Commercial associate" : "Working",
"State servant" : "Working",
"Maternity leave" : "Working",
"Pensioner" : "Retired"
}
data['NAME_INCOME_TYPE'].replace(replace_dict, inplace=True)
replace_dict = {
"Secondary / secondary special" : "Secondary_Education",
"Higher education" : "Higher_Education",
"Incomplete higher" : "Secondary_Education",
"Lower secondary" : "Lower_Secondary",
"Academic degree" : "Higher Education"
}
data['NAME_EDUCATION_TYPE'].replace(replace_dict, inplace=True)
replace_dict = {
"Single / not married" : "Single",
"Civil marriage" : "Married",
"Separated" : "Married",
"Widow" : "Married",
"Unknown" : "Married"
}
data['NAME_FAMILY_STATUS'].replace(replace_dict, inplace=True)
replace_dict = {
"House / apartment" : "Owned",
"With parents" : "Owned",
"Municipal apartment" : "Freehold",
"Office apartment" : "Freehold",
"Rented apartment" : "Rented",
"Co-op" : "Rented"
}
data['NAME_HOUSING_TYPE'].replace(replace_dict, inplace=True)
data['EXT_SOURCE_3'].fillna(data['EXT_SOURCE_3'].median(), inplace=True)
flag_doc_cols = ['FLAG_DOCUMENT_{}'.format(x) for x in range(2, 22)]
data['CNT_DOCUMENT'] = data[flag_doc_cols].sum(axis=1)
data.drop(columns=flag_doc_cols, inplace=True)
data['CODE_GENDER'].replace({"M": 1, "F": 0, "XNA": 1}, inplace=True)
data['CODE_GENDER'] = data['CODE_GENDER'].astype('int64')
data['DAYS_BIRTH'] = data['DAYS_BIRTH'].apply(lambda x: int(x * (-1) / 365))
data['DAYS_EMPLOYED'] = data['DAYS_EMPLOYED'].apply(lambda x: int(x * (-1) / 365))
ORDINAL_FEATURES = ['REGION_RATING_CLIENT',
'REGION_RATING_CLIENT_W_CITY',
'HOUR_APPR_PROCESS_START',
'WEEKDAY_APPR_PROCESS_START']
CONTINUOUS_FEATURES = ['FLOORSMAX_MEDI',
'TOTALAREA_MODE',
'OWN_CAR_AGE',
'APARTMENTS_MEDI',
'BASEMENTAREA_MEDI',
'YEARS_BEGINEXPLUATATION_MEDI',
'YEARS_BUILD_MEDI',
'COMMONAREA_MEDI',
'ELEVATORS_MEDI',
'ENTRANCES_MEDI',
'FLOORSMIN_MEDI',
'LANDAREA_MEDI',
'LIVINGAPARTMENTS_MEDI',
'LIVINGAREA_MEDI',
'NONLIVINGAPARTMENTS_MEDI',
'NONLIVINGAREA_MEDI',
'EXT_SOURCE_1',
'EXT_SOURCE_2',
'EXT_SOURCE_3',
'AMT_INCOME_TOTAL',
'AMT_CREDIT',
'AMT_ANNUITY',
'AMT_GOODS_PRICE',
'AMT_REQ_CREDIT_BUREAU_MON',
'AMT_REQ_CREDIT_BUREAU_DAY',
'AMT_REQ_CREDIT_BUREAU_WEEK',
'AMT_REQ_CREDIT_BUREAU_QRT',
'AMT_REQ_CREDIT_BUREAU_HOUR',
'OBS_30_CNT_SOCIAL_CIRCLE',
'DEF_30_CNT_SOCIAL_CIRCLE',
'OBS_60_CNT_SOCIAL_CIRCLE',
'DEF_60_CNT_SOCIAL_CIRCLE',
'CNT_CHILDREN',
'CNT_FAM_MEMBERS',
'CNT_DOCUMENT',
'DAYS_BIRTH',
'DAYS_EMPLOYED',
'DAYS_REGISTRATION',
'DAYS_ID_PUBLISH',
'DAYS_LAST_PHONE_CHANGE',
'REGION_POPULATION_RELATIVE']
CATEGORICAL_FEATURES = ['EMERGENCYSTATE_MODE',
'NAME_CONTRACT_TYPE',
'NAME_INCOME_TYPE',
'NAME_EDUCATION_TYPE',
'NAME_FAMILY_STATUS',
'NAME_HOUSING_TYPE',
'FLAG_OWN_CAR',
'FLAG_OWN_REALTY',
'FLAG_EMP_PHONE',
'FLAG_WORK_PHONE',
'FLAG_PHONE',
'FLAG_EMAIL',
'CODE_GENDER',
'ORGANIZATION_TYPE',
'LIVE_REGION_NOT_WORK_REGION',
'LIVE_CITY_NOT_WORK_CITY',
'REG_REGION_NOT_LIVE_REGION',
'REG_REGION_NOT_WORK_REGION',
'REG_CITY_NOT_LIVE_CITY',
'REG_CITY_NOT_WORK_CITY']
data[CONTINUOUS_FEATURES + CATEGORICAL_FEATURES + ORDINAL_FEATURES + ['TARGET']].to_csv(
'/Users/bangxixiao/Desktop/python_projects/DATA1030_MIDTERM_PROJECT/data/data_unimputed_2.csv',
index=False
)
```
|
github_jupyter
|
# Importing packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import re
from functools import reduce
from collections import defaultdict
from sklearn.impute import KNNImputer
from imblearn.over_sampling import SMOTE
from sklearn.pipeline import Pipeline
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, MinMaxScaler, StandardScaler
from sklearn.decomposition import TruncatedSVD
from sklearn.cross_decomposition import PLSSVD
from sklearn.compose import ColumnTransformer
# displaying all the rows and columns
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# setting random seed:
RANDOM_SEED = 42
# Load the data:
data = pd.read_csv('/Users/bangxixiao/Desktop/python_projects/DATA1030_MIDTERM_PROJECT/data/application_data.csv',
encoding='UTF-8')
print('The credit default data has {} rows and {} columns.'.format(data.shape[0], data.shape[1]))
data.info()
data.head()
# describe the data:
data.describe()
# classify columns to different fields mentioned above:
def get_columns_collection(d):
col_prefix = [x.split('_')[0] for x in d.columns]
col_collection = defaultdict(list)
for cp, c in zip(col_prefix, d.columns):
col_collection[cp].append(c)
return col_collection
# investigating the missing values:
data.isnull().sum().sort_values(ascending=False)
# percentage of missing:
(data.isnull().sum()/len(data)*100).sort_values(ascending=False)
replace_dict = {
"Commercial associate" : "Working",
"State servant" : "Working",
"Maternity leave" : "Working",
"Pensioner" : "Retired"
}
data['NAME_INCOME_TYPE'].replace(replace_dict, inplace=True)
replace_dict = {
"Secondary / secondary special" : "Secondary_Education",
"Higher education" : "Higher_Education",
"Incomplete higher" : "Secondary_Education",
"Lower secondary" : "Lower_Secondary",
"Academic degree" : "Higher Education"
}
data['NAME_EDUCATION_TYPE'].replace(replace_dict, inplace=True)
replace_dict = {
"Single / not married" : "Single",
"Civil marriage" : "Married",
"Separated" : "Married",
"Widow" : "Married",
"Unknown" : "Married"
}
data['NAME_FAMILY_STATUS'].replace(replace_dict, inplace=True)
replace_dict = {
"House / apartment" : "Owned",
"With parents" : "Owned",
"Municipal apartment" : "Freehold",
"Office apartment" : "Freehold",
"Rented apartment" : "Rented",
"Co-op" : "Rented"
}
data['NAME_HOUSING_TYPE'].replace(replace_dict, inplace=True)
data['EXT_SOURCE_3'].fillna(data['EXT_SOURCE_3'].median(), inplace=True)
flag_doc_cols = ['FLAG_DOCUMENT_{}'.format(x) for x in range(2, 22)]
data['CNT_DOCUMENT'] = data[flag_doc_cols].sum(axis=1)
data.drop(columns=flag_doc_cols, inplace=True)
data['CODE_GENDER'].replace({"M": 1, "F": 0, "XNA": 1}, inplace=True)
data['CODE_GENDER'] = data['CODE_GENDER'].astype('int64')
data['DAYS_BIRTH'] = data['DAYS_BIRTH'].apply(lambda x: int(x * (-1) / 365))
data['DAYS_EMPLOYED'] = data['DAYS_EMPLOYED'].apply(lambda x: int(x * (-1) / 365))
ORDINAL_FEATURES = ['REGION_RATING_CLIENT',
'REGION_RATING_CLIENT_W_CITY',
'HOUR_APPR_PROCESS_START',
'WEEKDAY_APPR_PROCESS_START']
CONTINUOUS_FEATURES = ['FLOORSMAX_MEDI',
'TOTALAREA_MODE',
'OWN_CAR_AGE',
'APARTMENTS_MEDI',
'BASEMENTAREA_MEDI',
'YEARS_BEGINEXPLUATATION_MEDI',
'YEARS_BUILD_MEDI',
'COMMONAREA_MEDI',
'ELEVATORS_MEDI',
'ENTRANCES_MEDI',
'FLOORSMIN_MEDI',
'LANDAREA_MEDI',
'LIVINGAPARTMENTS_MEDI',
'LIVINGAREA_MEDI',
'NONLIVINGAPARTMENTS_MEDI',
'NONLIVINGAREA_MEDI',
'EXT_SOURCE_1',
'EXT_SOURCE_2',
'EXT_SOURCE_3',
'AMT_INCOME_TOTAL',
'AMT_CREDIT',
'AMT_ANNUITY',
'AMT_GOODS_PRICE',
'AMT_REQ_CREDIT_BUREAU_MON',
'AMT_REQ_CREDIT_BUREAU_DAY',
'AMT_REQ_CREDIT_BUREAU_WEEK',
'AMT_REQ_CREDIT_BUREAU_QRT',
'AMT_REQ_CREDIT_BUREAU_HOUR',
'OBS_30_CNT_SOCIAL_CIRCLE',
'DEF_30_CNT_SOCIAL_CIRCLE',
'OBS_60_CNT_SOCIAL_CIRCLE',
'DEF_60_CNT_SOCIAL_CIRCLE',
'CNT_CHILDREN',
'CNT_FAM_MEMBERS',
'CNT_DOCUMENT',
'DAYS_BIRTH',
'DAYS_EMPLOYED',
'DAYS_REGISTRATION',
'DAYS_ID_PUBLISH',
'DAYS_LAST_PHONE_CHANGE',
'REGION_POPULATION_RELATIVE']
CATEGORICAL_FEATURES = ['EMERGENCYSTATE_MODE',
'NAME_CONTRACT_TYPE',
'NAME_INCOME_TYPE',
'NAME_EDUCATION_TYPE',
'NAME_FAMILY_STATUS',
'NAME_HOUSING_TYPE',
'FLAG_OWN_CAR',
'FLAG_OWN_REALTY',
'FLAG_EMP_PHONE',
'FLAG_WORK_PHONE',
'FLAG_PHONE',
'FLAG_EMAIL',
'CODE_GENDER',
'ORGANIZATION_TYPE',
'LIVE_REGION_NOT_WORK_REGION',
'LIVE_CITY_NOT_WORK_CITY',
'REG_REGION_NOT_LIVE_REGION',
'REG_REGION_NOT_WORK_REGION',
'REG_CITY_NOT_LIVE_CITY',
'REG_CITY_NOT_WORK_CITY']
data[CONTINUOUS_FEATURES + CATEGORICAL_FEATURES + ORDINAL_FEATURES + ['TARGET']].to_csv(
'/Users/bangxixiao/Desktop/python_projects/DATA1030_MIDTERM_PROJECT/data/data_unimputed_2.csv',
index=False
)
| 0.534855 | 0.817465 |
# Requirements
1) Install libraries to run code
2) The game can be run just by running the last cell along with it's saved trained .pkl model file
```
!pip install gym
!apt-get update && apt-get install cmake libopenmpi-dev python3-dev zlib1g-dev
!pip install stable-baselines
!apt-get install -y python-numpy python-dev cmake zlib1g-dev libjpeg-dev xvfb xorg-dev python-opengl libboost-all-dev libsdl2-dev swig
!pip install pyvirtualdisplay
!pip install gym
!pip install "gym[atari]"
!pip install piglet
import os
import gym
import numpy as np
import matplotlib.pyplot as plt
import time
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.deepq.policies import CnnPolicy
from stable_baselines.deepq.policies import LnCnnPolicy
from stable_baselines.deepq.policies import MlpPolicy
from stable_baselines.deepq.policies import LnMlpPolicy
from stable_baselines.bench import Monitor
from stable_baselines.results_plotter import load_results, ts2xy
from stable_baselines import DQN
```
# Define callback
Called after each step to store rewards
```
best_mean_reward, n_steps = -np.inf, 0
def callback(_locals, _globals):
"""
Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)
:param _locals: (dict)
:param _globals: (dict)
"""
global n_steps, best_mean_reward
# Print stats every 1000 calls
if (n_steps + 1) % 1000 == 0:
# Evaluate policy performance
x, y = ts2xy(load_results(log_dir), 'timesteps')
if len(x) > 0:
mean_reward = np.mean(y[-100:])
#print(x[-1], 'timesteps')
#print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(best_mean_reward, mean_reward))
# New best model, you could save the agent here
if mean_reward > best_mean_reward:
best_mean_reward = mean_reward
# Example for saving best model
print("Saving new best model at %d timestamp"%x[-1])
_locals['self'].save('deepq_IceHockey')
n_steps += 1
return True
```
# Initialise Environment
```
# Create log dir
log_dir = "/tmp/gym/"
os.makedirs(log_dir, exist_ok=True)
# Environment that gives image as input
env = gym.make('IceHockey-v0') # Use Ice Hockey environment that uses images
# Monitor training
env = Monitor(env, log_dir, allow_early_resets=True)
env = DummyVecEnv([lambda: env])
# Environment that gives RAM as input
env_ram = gym.make('IceHockey-ram-v0') # Use Ice Hockey environment that uses images
# Monitor training
env_ram = Monitor(env_ram, log_dir, allow_early_resets=True)
env_ram = DummyVecEnv([lambda: env_ram])
```
# Training using DQN with layer normalised CNN Policy (Image based)
```
model = DQN(CnnPolicy, env, verbose=1) # Load DQN for environment
start_time = time.time()
model.learn(total_timesteps=25000,callback=callback) # Run for 25000 steps on DQN
elapsed_time = time.time() - start_time
print("Time taken: %d s"%elapsed_time) # Display training time
model.save("deepq_IceHockey")
```
# Training using DQN with layer normalised MLP Policy (RAM based)
```
model = DQN(LnMlpPolicy, env_ram, verbose=1) # Load DQN for environment
start_time = time.time()
model.learn(total_timesteps=25000,callback=callback) # Run for 25000 steps on DQN
elapsed_time = time.time() - start_time
print("Time taken: %d s"%elapsed_time) # Display training time
model.save("deepq_IceHockey")
```
# Training Result
(RAM as input)
Reference:
https://github.com/hill-a/stable-baselines
https://colab.research.google.com/drive/1L_IMo6v0a0ALK8nefZm6PqPSy0vZIWBT
```
def plot_results(log_folder, title='Learning Curve'):
"""
plot the results
:param log_folder: (str) the save location of the results to plot
:param title: (str) the title of the task to plot
"""
x, y = ts2xy(load_results(log_folder), 'timesteps')
fig = plt.figure(title)
plt.plot(x, y)
plt.xlabel('Number of Timesteps')
plt.ylabel('Rewards')
plt.title(title )
plt.show()
plot_results(log_dir)
```
# Running trained model
To exit game restart kernel
Runs on linux env with packages installed
```
model = DQN.load("deepq_IceHockey.pkl") # Load saved model from file
obs = env_ram.reset() # Load trained model
while True: # Always play the game
action, _states = model.predict(obs)
obs, rewards, dones, info = env_ram.step(action)
env_ram.render()
```
|
github_jupyter
|
!pip install gym
!apt-get update && apt-get install cmake libopenmpi-dev python3-dev zlib1g-dev
!pip install stable-baselines
!apt-get install -y python-numpy python-dev cmake zlib1g-dev libjpeg-dev xvfb xorg-dev python-opengl libboost-all-dev libsdl2-dev swig
!pip install pyvirtualdisplay
!pip install gym
!pip install "gym[atari]"
!pip install piglet
import os
import gym
import numpy as np
import matplotlib.pyplot as plt
import time
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.deepq.policies import CnnPolicy
from stable_baselines.deepq.policies import LnCnnPolicy
from stable_baselines.deepq.policies import MlpPolicy
from stable_baselines.deepq.policies import LnMlpPolicy
from stable_baselines.bench import Monitor
from stable_baselines.results_plotter import load_results, ts2xy
from stable_baselines import DQN
best_mean_reward, n_steps = -np.inf, 0
def callback(_locals, _globals):
"""
Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)
:param _locals: (dict)
:param _globals: (dict)
"""
global n_steps, best_mean_reward
# Print stats every 1000 calls
if (n_steps + 1) % 1000 == 0:
# Evaluate policy performance
x, y = ts2xy(load_results(log_dir), 'timesteps')
if len(x) > 0:
mean_reward = np.mean(y[-100:])
#print(x[-1], 'timesteps')
#print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(best_mean_reward, mean_reward))
# New best model, you could save the agent here
if mean_reward > best_mean_reward:
best_mean_reward = mean_reward
# Example for saving best model
print("Saving new best model at %d timestamp"%x[-1])
_locals['self'].save('deepq_IceHockey')
n_steps += 1
return True
# Create log dir
log_dir = "/tmp/gym/"
os.makedirs(log_dir, exist_ok=True)
# Environment that gives image as input
env = gym.make('IceHockey-v0') # Use Ice Hockey environment that uses images
# Monitor training
env = Monitor(env, log_dir, allow_early_resets=True)
env = DummyVecEnv([lambda: env])
# Environment that gives RAM as input
env_ram = gym.make('IceHockey-ram-v0') # Use Ice Hockey environment that uses images
# Monitor training
env_ram = Monitor(env_ram, log_dir, allow_early_resets=True)
env_ram = DummyVecEnv([lambda: env_ram])
model = DQN(CnnPolicy, env, verbose=1) # Load DQN for environment
start_time = time.time()
model.learn(total_timesteps=25000,callback=callback) # Run for 25000 steps on DQN
elapsed_time = time.time() - start_time
print("Time taken: %d s"%elapsed_time) # Display training time
model.save("deepq_IceHockey")
model = DQN(LnMlpPolicy, env_ram, verbose=1) # Load DQN for environment
start_time = time.time()
model.learn(total_timesteps=25000,callback=callback) # Run for 25000 steps on DQN
elapsed_time = time.time() - start_time
print("Time taken: %d s"%elapsed_time) # Display training time
model.save("deepq_IceHockey")
def plot_results(log_folder, title='Learning Curve'):
"""
plot the results
:param log_folder: (str) the save location of the results to plot
:param title: (str) the title of the task to plot
"""
x, y = ts2xy(load_results(log_folder), 'timesteps')
fig = plt.figure(title)
plt.plot(x, y)
plt.xlabel('Number of Timesteps')
plt.ylabel('Rewards')
plt.title(title )
plt.show()
plot_results(log_dir)
model = DQN.load("deepq_IceHockey.pkl") # Load saved model from file
obs = env_ram.reset() # Load trained model
while True: # Always play the game
action, _states = model.predict(obs)
obs, rewards, dones, info = env_ram.step(action)
env_ram.render()
| 0.717408 | 0.686685 |
## Connecting to a Database
### Getting and Saving Data
*Curtis Miller*
Here I connect to a MySQL database called `PopPyramids` for managing the population pyramid data. I use the pymysql package to manage the connection.
```
import pymysql as sql
```
First we establish a connection with MySQL.
```
# Creates a connection
conn = sql.connect(host='localhost', # The host where the database is located
user='root', # The user connecting to the database
passwd=pswd, # The user's password (get your own or this may not work)
db='mysql')
cur = conn.cursor() # We send commands via the cursor
```
After establishing a connection, issue a command. Here, we send SQL to use a database.
```
_ = cur.execute("USE poppyramids;") # Returns the number of rows affected using a cursor command
print(_)
```
Now we close the connection. **Always close the connection!** (Bad things happen if you don't.)
```
cur.close() # Close the cursor
conn.close() # Close the connection
```
We can also connect using the package SQLAlchemy, which is indended to be a high-level interface to SQL databases, abstracting away SQL commands (here is a good tutorial for getting started: [A step-by-step SQLAlchemy tutorial](http://www.rmunn.com/sqlalchemy-tutorial/tutorial.html)). pandas depends on using SQLAlchemy objects for its SQL interfaces.
I want to rely on pure SQL for now, so I'm only interested is creating a connection via an `SQLEngine` object.
```
from sqlalchemy import create_engine
```
`create_engine()` creates connections for connecting to a database using a string input. Here is a function for generating such a string that connects to MySQL via pymysql.
```
def pymysql_sqlalchemy_stringgen(user, passwd, host, dbname):
"""Generate a connection string for use with SQLAlchemy for MySQL and PyMySQL connections
Args:
user (str): The username of the connecting user
passwd (str): The user's password
host (str): The host for where the database is located
dbname (str): The name of the database to connect with
Returns:
(str) A SQLAlchemy connection string suitable for use with create_engine()
Additional options for the connection are not supported with this function.
"""
return "mysql+pymysql://" + user + ":" + passwd + "@" + host + "/" + dbname
pymysql_sqlalchemy_stringgen("<username>", "<password>", "<host>", "<dbname>") # Overall format (not including options)
pymysql_sqlalchemy_stringgen("curtis", "guest", "localhost", "mydb") # Demonstration
```
Now create a connection via `create_engine()`.
```
conn_string = pymysql_sqlalchemy_stringgen("root", pswd, "localhost", "poppyramids")
conn = create_engine(conn_string).connect() # This does not work exactly like the connection we created above
# (for example; we don't create cursors)
```
We'll use `conn` later; for now, let's close the connection.
```
conn.close()
```
|
github_jupyter
|
import pymysql as sql
# Creates a connection
conn = sql.connect(host='localhost', # The host where the database is located
user='root', # The user connecting to the database
passwd=pswd, # The user's password (get your own or this may not work)
db='mysql')
cur = conn.cursor() # We send commands via the cursor
_ = cur.execute("USE poppyramids;") # Returns the number of rows affected using a cursor command
print(_)
cur.close() # Close the cursor
conn.close() # Close the connection
from sqlalchemy import create_engine
def pymysql_sqlalchemy_stringgen(user, passwd, host, dbname):
"""Generate a connection string for use with SQLAlchemy for MySQL and PyMySQL connections
Args:
user (str): The username of the connecting user
passwd (str): The user's password
host (str): The host for where the database is located
dbname (str): The name of the database to connect with
Returns:
(str) A SQLAlchemy connection string suitable for use with create_engine()
Additional options for the connection are not supported with this function.
"""
return "mysql+pymysql://" + user + ":" + passwd + "@" + host + "/" + dbname
pymysql_sqlalchemy_stringgen("<username>", "<password>", "<host>", "<dbname>") # Overall format (not including options)
pymysql_sqlalchemy_stringgen("curtis", "guest", "localhost", "mydb") # Demonstration
conn_string = pymysql_sqlalchemy_stringgen("root", pswd, "localhost", "poppyramids")
conn = create_engine(conn_string).connect() # This does not work exactly like the connection we created above
# (for example; we don't create cursors)
conn.close()
| 0.453504 | 0.870872 |
# Inheritance I
- [Download the lecture notes](https://philchodrow.github.io/PIC16A/content/object_oriented_programming/inheritance_I.ipynb).
Often, we face a problem that is *almost* solved by an existing class. For example, suppose I want to use Python to keep track of my grocery shopping. I can use a `dict` to log the items in my pantry:
```
pantry = {
"rice (lbs)" : 2,
"harissa (jars)" : 1,
"onions" : 5,
"lemons" : 3
}
```
Now suppose I go shopping, and I come back with:
```
shopping_trip = {
"rice (lbs)" : 1,
"onions" : 2,
"spinach (lbs)" : 1
}
```
What I'd like to do is add these `dict`s together in the obvious way, obtaining the `dict`
```
{
"rice (lbs)" : 3,
"harissa (jars)" : 1,
"onions" : 7,
"lemons" : 3,
"spinach (lbs)" : 1
}
```
Unfortunately, the native implementation of `dict`s doesn't support this kind of operation. For our first example, we will implement a new class that **inherits** from `dict`, and which supports basic arithmetic. In particular, once we're done, the following will achieve the expected result:
```
pantry += shopping_trip
```
To write a class `classA` that inherits from `classB`, just declare `class classA(classB)`. For example:
```
class ArithmeticDict(dict):
pass
```
Just by including the inheritance, this very boring class already does everything that a `dict` can do. In fact, it IS a `dict` -- that is, it is an *instance* of the `dict` class.
```
x = ArithmeticDict({'a' : 1, 'b' : 2})
x, type(x), isinstance(x, dict)
```
We can do normal `dict` methods:
```
x.update({'c' : 3})
x, x['a']
```
**Pause for a moment:** why were we able to do:
```a = ArithmeticDict({'a' : 1, 'b' : 2})```
and get the expected result?
```
# behind the scenes
# which __init__() method is this?
a = ArithmeticDict.__init__({'a' : 1, 'b' : 2})
b = dict.__init__({'a' : 1, 'b' : 2})
```
Of course, this doesn't give us anything new yet. The important part is that we are now able to define new methods that will be available only for the `ArithmeticDict` class.
```
class ArithmeticDict(dict):
"""
A dictionary class that supports entrywise addition
"""
def __add__(self, to_add):
"""
Add two ArithmeticDicts entrywise.
"""
new = {}
keys1 = set(self.keys())
keys2 = set(to_add.keys())
all_keys = keys1.union(keys2)
for key in all_keys:
new.update({key : self.get(key,0) + to_add.get(key,0)})
return ArithmeticDict(new)
x = ArithmeticDict({'a' : 1, 'b' : 2})
y = ArithmeticDict({'a' : 1, 'b' : 3, 'c' : 7})
x+y
```
I'm now able to update my pantry:
```
pantry = {
"rice (lbs)" : 2,
"harissa (jars)" : 1,
"onions" : 5,
"lemons" : 3
}
shopping_trip = {
"rice (lbs)" : 1,
"onions" : 2,
"spinach (lbs)" : 1
}
pantry = ArithmeticDict(pantry)
pantry
shopping_trip = ArithmeticDict(shopping_trip)
shopping_trip
pantry += shopping_trip
# OR pantry = pantry + shopping_trip
pantry
```
|
github_jupyter
|
pantry = {
"rice (lbs)" : 2,
"harissa (jars)" : 1,
"onions" : 5,
"lemons" : 3
}
shopping_trip = {
"rice (lbs)" : 1,
"onions" : 2,
"spinach (lbs)" : 1
}
{
"rice (lbs)" : 3,
"harissa (jars)" : 1,
"onions" : 7,
"lemons" : 3,
"spinach (lbs)" : 1
}
pantry += shopping_trip
class ArithmeticDict(dict):
pass
x = ArithmeticDict({'a' : 1, 'b' : 2})
x, type(x), isinstance(x, dict)
x.update({'c' : 3})
x, x['a']
and get the expected result?
Of course, this doesn't give us anything new yet. The important part is that we are now able to define new methods that will be available only for the `ArithmeticDict` class.
I'm now able to update my pantry:
| 0.308919 | 0.969237 |
```
import json
import numpy as np
import pandas as pd
from utils import preprocess
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
train, test = preprocess.get_data()
xTrain = train.drop('Survived', axis=1)
yTrain = train.Survived
```
## Creating and fixing the folds for all the classifiers
```
train = train.reset_index(drop=True)
train['fold'] = None
from sklearn.model_selection import StratifiedKFold
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=21)
for i, (train_idx, valid_dx) in enumerate(kfold.split(xTrain, yTrain)):
train.loc[valid_dx,'fold'] = i
```
## Loading pre-tuned classifiers
```
with open('./results/05_.json') as file:
top_clf = json.load(file)
print('Differet Classifiers with tuned Parameters\n')
for i, (key, val) in enumerate(top_clf.items()):
print(i+1,' ', key, '\n ', val)
print('-'*100, '\n')
def get_preds(train, estimator, params, prob=True):
"""
Function returns prediction or positive class probability by training on n-1 folds
and predicting for single fold at a time, based on predefind folds
Input:
train - dataframe with 'Survived' as target column and 'fold' with validation fold numbers.
estimator - classfier object
params - parameters for the classifier
prob - binary imput to spesify wether to return predicion for proabability
returns:
pandas series with predictions or probability for positive class based on input 'prob'
"""
Preds = pd.Series(index=train.index)
for fold in train.fold.unique():
xData = train.drop(['Survived', 'fold'], axis=1)
yData = train.Survived
X_train = xData[train['fold'] != fold]
y_train = yData[train['fold'] != fold]
X_valid = xData[train['fold'] == fold]
y_valid = yData[train['fold'] == fold]
clf = estimator(**params)
clf.fit(X_train, y_train)
if prob:
# only taking probability for class 1
Preds[train['fold'] == fold] = clf.predict_proba(X_valid)[:,1]
else:
Preds[train['fold'] == fold] = clf.predict(X_valid)
return Preds
prob_df = pd.DataFrame({
'nbc':get_preds(train, GaussianNB, top_clf['GaussianNB']),
'lrc':get_preds(train, LogisticRegression, top_clf['LogisticRegression']),
'nnc':get_preds(train, KNeighborsClassifier, top_clf['KNeighborsClassifier']),
'svc':get_preds(train, SVC, {**top_clf['SVC'], **{'probability':True}} ),
'rfc':get_preds(train, RandomForestClassifier, top_clf['RandomForestClassifier']),
'abc':get_preds(train, AdaBoostClassifier, top_clf['AdaBoostClassifier']),
'gbc':get_preds(train, GradientBoostingClassifier, top_clf['GradientBoostingClassifier'])
})
pred_df = prob_df.round()
print('Individual performance of the classifiers\n')
scores = {}
for each in pred_df:
scores[each] = np.mean(pred_df[each] == train.Survived)
print(pd.DataFrame(scores, index=[0]))
```
## Hard Voting
```
pred_df.head()
mode_pred = pred_df.mode(axis=1).values.flatten()
print("Accuracy for simple voting classifier", np.mean(mode_pred == train.Survived))
```
## Soft Voting
```
prob_df.head()
mean_prob = prob_df.mean(axis=1).values.flatten()
print("Accuracy for soft voting classifier", np.mean(mean_prob.round() == train.Survived))
```
## Test
```
train.head()
test.head()
def predict(train, test, estimator, params):
"""
Train given estimator on train and return prediction for test.
"""
X_train = train.drop(['Survived', 'fold'], axis=1)
y_train = train.Survived
clf = estimator(**params)
clf.fit(X_train, y_train)
pred = clf.predict_proba(test)[:,1]
return pred
test_prob_df = pd.DataFrame({
'nbc':predict(train, test, GaussianNB, top_clf['GaussianNB']),
'lrc':predict(train, test, LogisticRegression, top_clf['LogisticRegression']),
'nnc':predict(train, test, KNeighborsClassifier, top_clf['KNeighborsClassifier']),
'svc':predict(train, test, SVC, {**top_clf['SVC'], **{'probability':True}} ),
'rfc':predict(train, test, RandomForestClassifier, top_clf['RandomForestClassifier']),
'abc':predict(train, test, AdaBoostClassifier, top_clf['AdaBoostClassifier']),
'gbc':predict(train, test, GradientBoostingClassifier, top_clf['GradientBoostingClassifier'])
})
test_pred_df = test_prob_df.round()
hard_vote = test_pred_df.mode(axis=1).values.flatten()
soft_vote = test_prob_df.mean(axis=1).values.flatten().round()
```
## Stacking
### Training a model on predictions of first level Classifier's predictions
```
def parameterTune(estimator, param_grid, X, y):
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(
estimator = estimator,
param_grid = param_grid,
n_jobs = 11,
cv = 5,
)
grid.fit(X, y)
return grid.best_score_, grid.best_params_
# instantiating Support Vector Classifier
from sklearn.svm import SVC
estimator = SVC()
param_grid = [
{ 'kernel' : ['linear'],
'C' : [0.1, 1, 10, 100]},
{ 'kernel' : ['rbf'],
'C' : [0.1, 1, 10, 100],
'gamma' : ['scale', 'auto'],},
]
svc_best_score_, svc_best_params_ = parameterTune(estimator, param_grid, prob_df, yTrain)
print('best_score_:',svc_best_score_,'\nbest_params_:',svc_best_params_)
from sklearn.linear_model import LogisticRegression
estimator = LogisticRegression(tol=1e-4, solver='liblinear', random_state=1)
param_grid = {
'max_iter' : [1000, 2000, 3000],
'penalty' : ['l1', 'l2'],
'solver' : ['liblinear']
}
lrc_best_score_, lrc_best_params_ = parameterTune(estimator, param_grid, prob_df, yTrain)
print('best_score_:',lrc_best_score_,'\nbest_params_:',lrc_best_params_)
```
|
github_jupyter
|
import json
import numpy as np
import pandas as pd
from utils import preprocess
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
train, test = preprocess.get_data()
xTrain = train.drop('Survived', axis=1)
yTrain = train.Survived
train = train.reset_index(drop=True)
train['fold'] = None
from sklearn.model_selection import StratifiedKFold
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=21)
for i, (train_idx, valid_dx) in enumerate(kfold.split(xTrain, yTrain)):
train.loc[valid_dx,'fold'] = i
with open('./results/05_.json') as file:
top_clf = json.load(file)
print('Differet Classifiers with tuned Parameters\n')
for i, (key, val) in enumerate(top_clf.items()):
print(i+1,' ', key, '\n ', val)
print('-'*100, '\n')
def get_preds(train, estimator, params, prob=True):
"""
Function returns prediction or positive class probability by training on n-1 folds
and predicting for single fold at a time, based on predefind folds
Input:
train - dataframe with 'Survived' as target column and 'fold' with validation fold numbers.
estimator - classfier object
params - parameters for the classifier
prob - binary imput to spesify wether to return predicion for proabability
returns:
pandas series with predictions or probability for positive class based on input 'prob'
"""
Preds = pd.Series(index=train.index)
for fold in train.fold.unique():
xData = train.drop(['Survived', 'fold'], axis=1)
yData = train.Survived
X_train = xData[train['fold'] != fold]
y_train = yData[train['fold'] != fold]
X_valid = xData[train['fold'] == fold]
y_valid = yData[train['fold'] == fold]
clf = estimator(**params)
clf.fit(X_train, y_train)
if prob:
# only taking probability for class 1
Preds[train['fold'] == fold] = clf.predict_proba(X_valid)[:,1]
else:
Preds[train['fold'] == fold] = clf.predict(X_valid)
return Preds
prob_df = pd.DataFrame({
'nbc':get_preds(train, GaussianNB, top_clf['GaussianNB']),
'lrc':get_preds(train, LogisticRegression, top_clf['LogisticRegression']),
'nnc':get_preds(train, KNeighborsClassifier, top_clf['KNeighborsClassifier']),
'svc':get_preds(train, SVC, {**top_clf['SVC'], **{'probability':True}} ),
'rfc':get_preds(train, RandomForestClassifier, top_clf['RandomForestClassifier']),
'abc':get_preds(train, AdaBoostClassifier, top_clf['AdaBoostClassifier']),
'gbc':get_preds(train, GradientBoostingClassifier, top_clf['GradientBoostingClassifier'])
})
pred_df = prob_df.round()
print('Individual performance of the classifiers\n')
scores = {}
for each in pred_df:
scores[each] = np.mean(pred_df[each] == train.Survived)
print(pd.DataFrame(scores, index=[0]))
pred_df.head()
mode_pred = pred_df.mode(axis=1).values.flatten()
print("Accuracy for simple voting classifier", np.mean(mode_pred == train.Survived))
prob_df.head()
mean_prob = prob_df.mean(axis=1).values.flatten()
print("Accuracy for soft voting classifier", np.mean(mean_prob.round() == train.Survived))
train.head()
test.head()
def predict(train, test, estimator, params):
"""
Train given estimator on train and return prediction for test.
"""
X_train = train.drop(['Survived', 'fold'], axis=1)
y_train = train.Survived
clf = estimator(**params)
clf.fit(X_train, y_train)
pred = clf.predict_proba(test)[:,1]
return pred
test_prob_df = pd.DataFrame({
'nbc':predict(train, test, GaussianNB, top_clf['GaussianNB']),
'lrc':predict(train, test, LogisticRegression, top_clf['LogisticRegression']),
'nnc':predict(train, test, KNeighborsClassifier, top_clf['KNeighborsClassifier']),
'svc':predict(train, test, SVC, {**top_clf['SVC'], **{'probability':True}} ),
'rfc':predict(train, test, RandomForestClassifier, top_clf['RandomForestClassifier']),
'abc':predict(train, test, AdaBoostClassifier, top_clf['AdaBoostClassifier']),
'gbc':predict(train, test, GradientBoostingClassifier, top_clf['GradientBoostingClassifier'])
})
test_pred_df = test_prob_df.round()
hard_vote = test_pred_df.mode(axis=1).values.flatten()
soft_vote = test_prob_df.mean(axis=1).values.flatten().round()
def parameterTune(estimator, param_grid, X, y):
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(
estimator = estimator,
param_grid = param_grid,
n_jobs = 11,
cv = 5,
)
grid.fit(X, y)
return grid.best_score_, grid.best_params_
# instantiating Support Vector Classifier
from sklearn.svm import SVC
estimator = SVC()
param_grid = [
{ 'kernel' : ['linear'],
'C' : [0.1, 1, 10, 100]},
{ 'kernel' : ['rbf'],
'C' : [0.1, 1, 10, 100],
'gamma' : ['scale', 'auto'],},
]
svc_best_score_, svc_best_params_ = parameterTune(estimator, param_grid, prob_df, yTrain)
print('best_score_:',svc_best_score_,'\nbest_params_:',svc_best_params_)
from sklearn.linear_model import LogisticRegression
estimator = LogisticRegression(tol=1e-4, solver='liblinear', random_state=1)
param_grid = {
'max_iter' : [1000, 2000, 3000],
'penalty' : ['l1', 'l2'],
'solver' : ['liblinear']
}
lrc_best_score_, lrc_best_params_ = parameterTune(estimator, param_grid, prob_df, yTrain)
print('best_score_:',lrc_best_score_,'\nbest_params_:',lrc_best_params_)
| 0.628293 | 0.695193 |
# Intial commands
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
import pavelstatsutils
%matplotlib inline
patients = pd.read_csv("patients.csv")
controls = pd.read_csv("controls.csv")
df = pd.concat([patients, controls])
df.head()
```
# Descriptional statistics
## Whole sample
```
df.describe()
```
## Patients
```
df.loc[df['PATIENT'] == 1].describe()
```
## Controls
```
df.loc[df['PATIENT'] == 0].describe()
```
# Plotting data
```
sns.pairplot(df.dropna(how='any'), vars=[x for x in df.columns if x!='PATIENT'], hue="PATIENT", size=2)
```
# Correlations
## Whole sample correlation
```
corr_r, corr_p, corr_b = pavelstatsutils.corr(df)
corr_r
corr_p
corr_b
sns.heatmap(corr_r)
sns.heatmap(corr_b)
```
## Patients correlations
```
pat_corr_r, pat_corr_p, pat_corr_b = pavelstatsutils.corr(df.loc[df['PATIENT'] == 1].drop(['PATIENT'], axis=1))
pat_corr_r
pat_corr_p
pat_corr_b
sns.heatmap(pat_corr_r)
sns.heatmap(pat_corr_b)
```
## Controls correlations
```
con_corr_r, con_corr_p, con_corr_b = pavelstatsutils.corr(df.loc[df['PATIENT'] == 0].drop(['PATIENT'], axis=1))
con_corr_r
con_corr_p
con_corr_b
sns.heatmap(con_corr_r)
sns.heatmap(con_corr_b)
```
## Comparison of patients and groups in significant correlations
```
sns.heatmap( pat_corr_b ^ con_corr_b )
```
# Group comparisons
## Plots
```
f, axes = plt.subplots(1, len(df.columns)-2, figsize=(10, 10), sharex=True)
i = 0
for column in df.columns:
if column != "PATIENT" and column != "gender":
sns.boxplot(x="PATIENT", y=column, data=df, ax=axes[i])
i = i + 1
#plt.setp(axes, yticks=[])
plt.tight_layout()
```
## Parametric testing prerequisities
Shapiro: Shapiro-Wilk's test for normality.
Levene: Levene's test for homoscedasticity.
```
prereq = {}
prereq_p = {}
for column in df.columns:
if column != 'PATIENT':
prereq_p[column] = []
prereq[column] = []
#All Normality Shapiro-Wilk test
W, p = scipy.stats.shapiro(df[column].dropna())
normality = p > 0.05
prereq_p[column].append(p)
prereq[column].append(normality)
#Patients Normality Shapiro-Wilk test
W, p = scipy.stats.shapiro(df.loc[df['PATIENT'] == 1, column].dropna())
normality = p > 0.05
prereq_p[column].append(p)
prereq[column].append(normality)
#Controls Normality Shapiro-Wilk test
W, p = scipy.stats.shapiro(df.loc[df['PATIENT'] == 0, column].dropna())
normality = p > 0.05
prereq_p[column].append(p)
prereq[column].append(normality)
#Patients & Controls Homoscedasticity
W, p = scipy.stats.levene(df.loc[df['PATIENT'] == 1, column].dropna(), df.loc[df['PATIENT'] == 0, column].dropna())
homoscedasticity = p > 0.05
prereq_p[column].append(p)
prereq[column].append(homoscedasticity)
prerequisities = pd.DataFrame(prereq, index=['all_normality', 'patients_normality', 'controls_normality', 'homoscedasticity'])
prerequisities_p = pd.DataFrame(prereq_p, index=['all_Shapiro', 'patients_Shapiro', 'controls_Shapiro', 'Levene'])
prerequisities
prerequisities_p
```
## Stastistical tests
```
test = {}
for column in df.columns:
if column != 'PATIENT':
test[column] = []
homoscedasticity = prerequisities.loc['homoscedasticity'][column]
#Student's T-test
if homoscedasticity:
t, p = scipy.stats.ttest_ind(
df.loc[df['PATIENT'] == 1, column].dropna(),
df.loc[df['PATIENT'] == 0, column].dropna(),
equal_var=homoscedasticity
)
test[column].append(p) #Student's T-test (prerequisities fullfilled)
test[column].append('') #Welsh T-test
#Welsh T-test
else:
t, p = scipy.stats.ttest_ind(
df.loc[df['PATIENT'] == 1, column].dropna(),
df.loc[df['PATIENT'] == 0, column].dropna(),
equal_var=homoscedasticity
)
test[column].append('') #Student's T-test (prerequisities not fullfilled)
test[column].append(p)
#Mann-Whitney U-test
u, p = scipy.stats.mannwhitneyu(
df.loc[df['PATIENT'] == 1, column].dropna(),
df.loc[df['PATIENT'] == 0, column].dropna()
)
test[column].append(p)
test = pd.DataFrame(test, index=['Student_T-test', 'Welsh_T-test', 'Mann-Whitney_U-test'])
test
```
## Bonferoni correction
```
print "p = {}".format( 0.05/float(len(test.columns)) )
```
# RBD split
```
rbd = df[df['PATIENT'] == 1]
rbd = rbd.drop(['PATIENT', 'SN_area', 'SN_index', '3rd_ventricle'], axis=1 )
rbd
sns.pairplot(rbd.dropna(how='any'), vars=rbd.columns, size=2)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
import pavelstatsutils
%matplotlib inline
patients = pd.read_csv("patients.csv")
controls = pd.read_csv("controls.csv")
df = pd.concat([patients, controls])
df.head()
df.describe()
df.loc[df['PATIENT'] == 1].describe()
df.loc[df['PATIENT'] == 0].describe()
sns.pairplot(df.dropna(how='any'), vars=[x for x in df.columns if x!='PATIENT'], hue="PATIENT", size=2)
corr_r, corr_p, corr_b = pavelstatsutils.corr(df)
corr_r
corr_p
corr_b
sns.heatmap(corr_r)
sns.heatmap(corr_b)
pat_corr_r, pat_corr_p, pat_corr_b = pavelstatsutils.corr(df.loc[df['PATIENT'] == 1].drop(['PATIENT'], axis=1))
pat_corr_r
pat_corr_p
pat_corr_b
sns.heatmap(pat_corr_r)
sns.heatmap(pat_corr_b)
con_corr_r, con_corr_p, con_corr_b = pavelstatsutils.corr(df.loc[df['PATIENT'] == 0].drop(['PATIENT'], axis=1))
con_corr_r
con_corr_p
con_corr_b
sns.heatmap(con_corr_r)
sns.heatmap(con_corr_b)
sns.heatmap( pat_corr_b ^ con_corr_b )
f, axes = plt.subplots(1, len(df.columns)-2, figsize=(10, 10), sharex=True)
i = 0
for column in df.columns:
if column != "PATIENT" and column != "gender":
sns.boxplot(x="PATIENT", y=column, data=df, ax=axes[i])
i = i + 1
#plt.setp(axes, yticks=[])
plt.tight_layout()
prereq = {}
prereq_p = {}
for column in df.columns:
if column != 'PATIENT':
prereq_p[column] = []
prereq[column] = []
#All Normality Shapiro-Wilk test
W, p = scipy.stats.shapiro(df[column].dropna())
normality = p > 0.05
prereq_p[column].append(p)
prereq[column].append(normality)
#Patients Normality Shapiro-Wilk test
W, p = scipy.stats.shapiro(df.loc[df['PATIENT'] == 1, column].dropna())
normality = p > 0.05
prereq_p[column].append(p)
prereq[column].append(normality)
#Controls Normality Shapiro-Wilk test
W, p = scipy.stats.shapiro(df.loc[df['PATIENT'] == 0, column].dropna())
normality = p > 0.05
prereq_p[column].append(p)
prereq[column].append(normality)
#Patients & Controls Homoscedasticity
W, p = scipy.stats.levene(df.loc[df['PATIENT'] == 1, column].dropna(), df.loc[df['PATIENT'] == 0, column].dropna())
homoscedasticity = p > 0.05
prereq_p[column].append(p)
prereq[column].append(homoscedasticity)
prerequisities = pd.DataFrame(prereq, index=['all_normality', 'patients_normality', 'controls_normality', 'homoscedasticity'])
prerequisities_p = pd.DataFrame(prereq_p, index=['all_Shapiro', 'patients_Shapiro', 'controls_Shapiro', 'Levene'])
prerequisities
prerequisities_p
test = {}
for column in df.columns:
if column != 'PATIENT':
test[column] = []
homoscedasticity = prerequisities.loc['homoscedasticity'][column]
#Student's T-test
if homoscedasticity:
t, p = scipy.stats.ttest_ind(
df.loc[df['PATIENT'] == 1, column].dropna(),
df.loc[df['PATIENT'] == 0, column].dropna(),
equal_var=homoscedasticity
)
test[column].append(p) #Student's T-test (prerequisities fullfilled)
test[column].append('') #Welsh T-test
#Welsh T-test
else:
t, p = scipy.stats.ttest_ind(
df.loc[df['PATIENT'] == 1, column].dropna(),
df.loc[df['PATIENT'] == 0, column].dropna(),
equal_var=homoscedasticity
)
test[column].append('') #Student's T-test (prerequisities not fullfilled)
test[column].append(p)
#Mann-Whitney U-test
u, p = scipy.stats.mannwhitneyu(
df.loc[df['PATIENT'] == 1, column].dropna(),
df.loc[df['PATIENT'] == 0, column].dropna()
)
test[column].append(p)
test = pd.DataFrame(test, index=['Student_T-test', 'Welsh_T-test', 'Mann-Whitney_U-test'])
test
print "p = {}".format( 0.05/float(len(test.columns)) )
rbd = df[df['PATIENT'] == 1]
rbd = rbd.drop(['PATIENT', 'SN_area', 'SN_index', '3rd_ventricle'], axis=1 )
rbd
sns.pairplot(rbd.dropna(how='any'), vars=rbd.columns, size=2)
| 0.171651 | 0.823115 |
```
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
resultPath = "resultFiles/featureExtractionV2_by_Jun/"
intersected_genes = []
for x in ['CD4', 'CD8', 'CD14']:
df_healthy = pd.read_csv(resultPath+"HealthyPatients/"+x+".Ranksum.RFECV.gene.result", index_col=0)
df_long = pd.read_csv(resultPath+"LongDiseaseDuration/"+x+".Ranksum.RFECV.gene.result", index_col=0)
print(x)
print(len(df_healthy), len(df_long))
intersected_genes.append(list(set(df_healthy.index.tolist()).intersection(set(df_long.index.tolist()))))
flatten_genes = []
for x in intersected_genes:
print(len(x))
for y in x:
flatten_genes.append(y)
## Utils and Library for notebook
from notebook_utils.OpenKbcMSToolkit import ExtractionToolkit as exttoolkit
import itertools
def _LoadDiseaseDuration(df, meta_data, returntype='long'):
"""
df : Expression or activation score matrix
meta_data : meta data which contains duration and sample ID
output: long DD samples and short DD samples by list, or healthy samples and short DD samples by list
"""
# Sample by disease category
sample_list, sample_category = exttoolkit.get_sample_name_by_category(dataframe=meta_data, sampleColumn='HCVB_ID', dataColname='DiseaseCourse')
# Sort by disease category and exclude uknown samples
patient_samples = [] # patient samples
healthy_samples = [] # healthy samples
for samples, category in zip(sample_list, sample_category):
if category=='Healthy':
healthy_samples = samples
else:
if category!='Unknown':# Excluding unknown samples
patient_samples.append(samples)
patient_samples = list(itertools.chain(*patient_samples)) # flatten
patient_samples = list(set(patient_samples).intersection(df.columns.tolist())) # intersected with act score matrix
healthy_samples = list(set(healthy_samples).intersection(df.columns.tolist())) # intersected with act score matrix
patient_meta = meta_data.loc[meta_data['HCVB_ID'].isin(patient_samples)] # Make patient metadata
longDD_samples, shortDD_samples = exttoolkit.get_sample_name_by_contValues(patient_meta, 'HCVB_ID', 'DiseaseDuration', 50)
longDD_samples = list(set(longDD_samples.values.tolist()).intersection(df.columns.tolist())) # intersected with act score matrix
shortDD_samples = list(set(shortDD_samples.values.tolist()).intersection(df.columns.tolist())) # intersected with act score matrix
if returntype=='long':
return longDD_samples, shortDD_samples
elif returntype=='healthy':
return healthy_samples, shortDD_samples
## With healthy
CD8_healthy = pd.read_csv(resultPath+"HealthyPatients/CD4.Ranksum.RFECV.gene.result", index_col=0)
CD8_healthy_ext = CD8_healthy.loc[intersected_genes[0]]
CD8_healthy_ext = CD8_healthy_ext.applymap(lambda x: np.log2(x+1))
CD8_healthy_ext = CD8_healthy_ext.subtract(CD8_healthy_ext.median(axis=1), axis=0)
meta_data = pd.read_csv('../data/annotation_metadata/EPIC_HCvB_metadata_baseline_updated-share.csv')
healthy_samples, shortDD_samples = _LoadDiseaseDuration(CD8_healthy_ext, meta_data, 'healthy')
lut = dict(zip(['short', 'healthy'], "rb"))
cate_map = ['short']*len(shortDD_samples)+['healthy']*len(healthy_samples)
cate_map = [lut[x] for x in cate_map]
sns.clustermap(CD8_healthy_ext[shortDD_samples+healthy_samples], col_colors=cate_map, method='complete', metric='euclidean', cmap='bwr', vmin=-.8, vmax=.8)
## With healthy
CD8_healthy = pd.read_csv(resultPath+"LongDiseaseDuration/CD4.Ranksum.RFECV.gene.result", index_col=0)
CD8_healthy_ext = CD8_healthy.loc[intersected_genes[0]]
CD8_healthy_ext = CD8_healthy_ext.applymap(lambda x: np.log2(x+1))
CD8_healthy_ext = CD8_healthy_ext.subtract(CD8_healthy_ext.median(axis=1), axis=0)
meta_data = pd.read_csv('../data/annotation_metadata/EPIC_HCvB_metadata_baseline_updated-share.csv')
longDD_samples, shortDD_samples = _LoadDiseaseDuration(CD8_healthy_ext, meta_data, 'long')
lut = dict(zip(['short', 'long'], "rb"))
cate_map = ['short']*len(shortDD_samples)+['long']*len(longDD_samples)
cate_map = [lut[x] for x in cate_map]
sns.clustermap(CD8_healthy_ext[shortDD_samples+longDD_samples], col_colors=cate_map, method='complete', metric='cosine', cmap='bwr', vmin=-.8, vmax=.8)
flatten_genes
```
|
github_jupyter
|
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
resultPath = "resultFiles/featureExtractionV2_by_Jun/"
intersected_genes = []
for x in ['CD4', 'CD8', 'CD14']:
df_healthy = pd.read_csv(resultPath+"HealthyPatients/"+x+".Ranksum.RFECV.gene.result", index_col=0)
df_long = pd.read_csv(resultPath+"LongDiseaseDuration/"+x+".Ranksum.RFECV.gene.result", index_col=0)
print(x)
print(len(df_healthy), len(df_long))
intersected_genes.append(list(set(df_healthy.index.tolist()).intersection(set(df_long.index.tolist()))))
flatten_genes = []
for x in intersected_genes:
print(len(x))
for y in x:
flatten_genes.append(y)
## Utils and Library for notebook
from notebook_utils.OpenKbcMSToolkit import ExtractionToolkit as exttoolkit
import itertools
def _LoadDiseaseDuration(df, meta_data, returntype='long'):
"""
df : Expression or activation score matrix
meta_data : meta data which contains duration and sample ID
output: long DD samples and short DD samples by list, or healthy samples and short DD samples by list
"""
# Sample by disease category
sample_list, sample_category = exttoolkit.get_sample_name_by_category(dataframe=meta_data, sampleColumn='HCVB_ID', dataColname='DiseaseCourse')
# Sort by disease category and exclude uknown samples
patient_samples = [] # patient samples
healthy_samples = [] # healthy samples
for samples, category in zip(sample_list, sample_category):
if category=='Healthy':
healthy_samples = samples
else:
if category!='Unknown':# Excluding unknown samples
patient_samples.append(samples)
patient_samples = list(itertools.chain(*patient_samples)) # flatten
patient_samples = list(set(patient_samples).intersection(df.columns.tolist())) # intersected with act score matrix
healthy_samples = list(set(healthy_samples).intersection(df.columns.tolist())) # intersected with act score matrix
patient_meta = meta_data.loc[meta_data['HCVB_ID'].isin(patient_samples)] # Make patient metadata
longDD_samples, shortDD_samples = exttoolkit.get_sample_name_by_contValues(patient_meta, 'HCVB_ID', 'DiseaseDuration', 50)
longDD_samples = list(set(longDD_samples.values.tolist()).intersection(df.columns.tolist())) # intersected with act score matrix
shortDD_samples = list(set(shortDD_samples.values.tolist()).intersection(df.columns.tolist())) # intersected with act score matrix
if returntype=='long':
return longDD_samples, shortDD_samples
elif returntype=='healthy':
return healthy_samples, shortDD_samples
## With healthy
CD8_healthy = pd.read_csv(resultPath+"HealthyPatients/CD4.Ranksum.RFECV.gene.result", index_col=0)
CD8_healthy_ext = CD8_healthy.loc[intersected_genes[0]]
CD8_healthy_ext = CD8_healthy_ext.applymap(lambda x: np.log2(x+1))
CD8_healthy_ext = CD8_healthy_ext.subtract(CD8_healthy_ext.median(axis=1), axis=0)
meta_data = pd.read_csv('../data/annotation_metadata/EPIC_HCvB_metadata_baseline_updated-share.csv')
healthy_samples, shortDD_samples = _LoadDiseaseDuration(CD8_healthy_ext, meta_data, 'healthy')
lut = dict(zip(['short', 'healthy'], "rb"))
cate_map = ['short']*len(shortDD_samples)+['healthy']*len(healthy_samples)
cate_map = [lut[x] for x in cate_map]
sns.clustermap(CD8_healthy_ext[shortDD_samples+healthy_samples], col_colors=cate_map, method='complete', metric='euclidean', cmap='bwr', vmin=-.8, vmax=.8)
## With healthy
CD8_healthy = pd.read_csv(resultPath+"LongDiseaseDuration/CD4.Ranksum.RFECV.gene.result", index_col=0)
CD8_healthy_ext = CD8_healthy.loc[intersected_genes[0]]
CD8_healthy_ext = CD8_healthy_ext.applymap(lambda x: np.log2(x+1))
CD8_healthy_ext = CD8_healthy_ext.subtract(CD8_healthy_ext.median(axis=1), axis=0)
meta_data = pd.read_csv('../data/annotation_metadata/EPIC_HCvB_metadata_baseline_updated-share.csv')
longDD_samples, shortDD_samples = _LoadDiseaseDuration(CD8_healthy_ext, meta_data, 'long')
lut = dict(zip(['short', 'long'], "rb"))
cate_map = ['short']*len(shortDD_samples)+['long']*len(longDD_samples)
cate_map = [lut[x] for x in cate_map]
sns.clustermap(CD8_healthy_ext[shortDD_samples+longDD_samples], col_colors=cate_map, method='complete', metric='cosine', cmap='bwr', vmin=-.8, vmax=.8)
flatten_genes
| 0.210198 | 0.342489 |
```
import pandas_datareader as pdr
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import sys
import numpy as np
from IPython.display import display, HTML
from itertools import cycle, islice
class Port:
def __init__(self,csv_path='port.csv'):
self.df = pd.read_csv(csv_path,index_col='Symbol')
assert self.df['Total'].describe().dtype.name == 'float64' , "Value in Total column must be float"
assert self.df['Cost'].describe().dtype.name == 'float64' , "Value in Cost column must be number"
assert self.df['Volume'].describe().dtype.name == 'float64' , "Value in Volume column must be number"
assert self.df['Dividend'].describe().dtype.name == 'float64' , "Value in Dividend column must be number"
self.total_cost = sum(self.df['Total'])
self.updateLatestPrice()
def chart(self,y="Total",title="Asset Allocation"):
def func(pct, df):
allvals = df['Total'].tolist()
absolute = pct/100.*np.sum(allvals)
index = [i for i in range(len(df['Total'].tolist())) if df['Total'].tolist()[i] == float("{:.2f}".format(absolute))]
return "{:.1f}%\n{:.2f} ฿\n{:d} volume".format(pct, absolute,int(df.iloc[index[0]]['Volume']))
self.df.plot.pie(y=y,title=title , autopct=lambda pct: func(pct, self.df),figsize=(12,12))
return
@staticmethod
def getLatestPrice(symbol):
separate = Port.getDataFromHTML(url='https://marketdata.set.or.th/mkt/stockquotation.do?symbol='+symbol +'&ssoPageId=1&language=th&country=TH')
return float(separate.iloc[1][-1])
def updateLatestPrice(self):
for x in self.df.index:
self.df.loc[x,'Latest'] = Port.getLatestPrice(x)
self.df.loc[x,'Profit'] = float("{:.2f}".format((Port.getLatestPrice(x) - self.df.loc[x,'Cost'])*self.df.loc[x,'Volume']))
self.df.loc[x,'Profit%'] = float("{:.2f}".format(self.df.loc[x,'Profit']*self.df.loc[x,'Cost']/100))
def HPR(self):
for x in self.df.index:
self.df.loc[x,'HPR'] = (self.df.loc[x,'Dividend']+(self.df.loc[x,'Latest']-self.df.loc[x,'Cost']))/self.df.loc[x,'Cost']
def saveToCSV(self):
self.df.to_csv('port.csv')
return
def describe(self):
total_cost = float("{:.2f}".format(sum(self.df['Total'])))
total_profit = float("{:.2f}".format(sum(self.df['Profit'])))
values = self.df['Profit'].tolist()
clrs = ['red' if (x < 0) else 'green' for x in values ]
self.df.plot.bar(y="Profit",figsize=(12,5),title="Profit/Loss" ,color=clrs)
display(HTML(self.df.to_html()))
print("Cost : {} BATH".format(total_cost),end=" | ")
print("Profit : {} BATH".format(total_cost+total_profit),end=" | ")
print("Relize : {} BATH".format(total_profit))
self.chart(y="Total",title="asset allocation")
@staticmethod
def getDataFromHTML(url,index_col=''):
try:
if index_col != '':
return pd.read_html(url,index_col=0)[0]
else:
return pd.read_html(url)[0]
except:
assert False, "โปรดตรวจสอบ URL ของท่าน"
@staticmethod
def CGD(symbol,DIV,g=8):
try:
int(DIV)
int(g)
except:
assert False , "DIV and G must be number"
if g==DIV:
assert False , "DIV ต้องไม่เท่ากับ G"
print("Instrinsic Value ของหุ้น {}".format(symbol))
d = Port.getDataFromHTML(url='https://www.set.or.th/set/companyrights.do?symbol='+symbol+'&ssoPageId=7&language=th&country=TH',index_col=0)
dividend = float(d.loc['เงินปันผล(บาท/หุ้น)']['เครื่องหมาย'].tolist()[0])
DIV = DIV/100
g = g/100
V = (dividend*(1+g))/(DIV-g)
print("ปันผลล่าสุด {}\nคาดว่าอัตราเติบโตของเงินปันผลจ่ายเท่ากับ {}\nอัตราผลตอบแทนที่ต้องการ {}".format(dividend,g,DIV))
d = Port.getDataFromHTML(url='https://www.set.or.th/set/companyhighlight.do?symbol='+symbol+'&ssoPageId=5&language=th&country=TH',index_col=0)
print("ราคาหุ้นปัจจุบัน {:.2f}".format(float(d.loc['ราคาล่าสุด(บาท)'].tolist()[-1])))
print("Instrinsic Value (มูลค่าราคาหุ้น) : {:.2f}".format(V))
print("----------------------------------------------")
def getAdjClose(df):
data = {}
for x in df.index:
data[x] = pdr.get_data_yahoo(x+'.BK')['Adj Close']
data = pd.DataFrame(data)
return data
def PairTrade(df,symbol1,symbol2):
data['diff'] = data[symbol1] - data[symbol2]
d = data[[symbol1,symbol2,'diff']]
d.plot()
d.plot.hist(y='diff')
d['mean'] = d['diff'].mean()
d['-1SD'] = d['diff'].mean() - d['diff'].std()
d['+1SD'] = d['diff'].mean() + d['diff'].std()
d.plot(y=['diff','-1SD','mean','+1SD'])
print("Mean : {} , SD : {} ".format(d['diff'].mean(),d['diff'].std()))
def checkCGD_fromPort(self,div):
for x in self.df.index:
Port.CGD(x,div)
myPort = Port()
myPort.describe()
myPort.checkCGD_fromPort(div=10)
```
|
github_jupyter
|
import pandas_datareader as pdr
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import sys
import numpy as np
from IPython.display import display, HTML
from itertools import cycle, islice
class Port:
def __init__(self,csv_path='port.csv'):
self.df = pd.read_csv(csv_path,index_col='Symbol')
assert self.df['Total'].describe().dtype.name == 'float64' , "Value in Total column must be float"
assert self.df['Cost'].describe().dtype.name == 'float64' , "Value in Cost column must be number"
assert self.df['Volume'].describe().dtype.name == 'float64' , "Value in Volume column must be number"
assert self.df['Dividend'].describe().dtype.name == 'float64' , "Value in Dividend column must be number"
self.total_cost = sum(self.df['Total'])
self.updateLatestPrice()
def chart(self,y="Total",title="Asset Allocation"):
def func(pct, df):
allvals = df['Total'].tolist()
absolute = pct/100.*np.sum(allvals)
index = [i for i in range(len(df['Total'].tolist())) if df['Total'].tolist()[i] == float("{:.2f}".format(absolute))]
return "{:.1f}%\n{:.2f} ฿\n{:d} volume".format(pct, absolute,int(df.iloc[index[0]]['Volume']))
self.df.plot.pie(y=y,title=title , autopct=lambda pct: func(pct, self.df),figsize=(12,12))
return
@staticmethod
def getLatestPrice(symbol):
separate = Port.getDataFromHTML(url='https://marketdata.set.or.th/mkt/stockquotation.do?symbol='+symbol +'&ssoPageId=1&language=th&country=TH')
return float(separate.iloc[1][-1])
def updateLatestPrice(self):
for x in self.df.index:
self.df.loc[x,'Latest'] = Port.getLatestPrice(x)
self.df.loc[x,'Profit'] = float("{:.2f}".format((Port.getLatestPrice(x) - self.df.loc[x,'Cost'])*self.df.loc[x,'Volume']))
self.df.loc[x,'Profit%'] = float("{:.2f}".format(self.df.loc[x,'Profit']*self.df.loc[x,'Cost']/100))
def HPR(self):
for x in self.df.index:
self.df.loc[x,'HPR'] = (self.df.loc[x,'Dividend']+(self.df.loc[x,'Latest']-self.df.loc[x,'Cost']))/self.df.loc[x,'Cost']
def saveToCSV(self):
self.df.to_csv('port.csv')
return
def describe(self):
total_cost = float("{:.2f}".format(sum(self.df['Total'])))
total_profit = float("{:.2f}".format(sum(self.df['Profit'])))
values = self.df['Profit'].tolist()
clrs = ['red' if (x < 0) else 'green' for x in values ]
self.df.plot.bar(y="Profit",figsize=(12,5),title="Profit/Loss" ,color=clrs)
display(HTML(self.df.to_html()))
print("Cost : {} BATH".format(total_cost),end=" | ")
print("Profit : {} BATH".format(total_cost+total_profit),end=" | ")
print("Relize : {} BATH".format(total_profit))
self.chart(y="Total",title="asset allocation")
@staticmethod
def getDataFromHTML(url,index_col=''):
try:
if index_col != '':
return pd.read_html(url,index_col=0)[0]
else:
return pd.read_html(url)[0]
except:
assert False, "โปรดตรวจสอบ URL ของท่าน"
@staticmethod
def CGD(symbol,DIV,g=8):
try:
int(DIV)
int(g)
except:
assert False , "DIV and G must be number"
if g==DIV:
assert False , "DIV ต้องไม่เท่ากับ G"
print("Instrinsic Value ของหุ้น {}".format(symbol))
d = Port.getDataFromHTML(url='https://www.set.or.th/set/companyrights.do?symbol='+symbol+'&ssoPageId=7&language=th&country=TH',index_col=0)
dividend = float(d.loc['เงินปันผล(บาท/หุ้น)']['เครื่องหมาย'].tolist()[0])
DIV = DIV/100
g = g/100
V = (dividend*(1+g))/(DIV-g)
print("ปันผลล่าสุด {}\nคาดว่าอัตราเติบโตของเงินปันผลจ่ายเท่ากับ {}\nอัตราผลตอบแทนที่ต้องการ {}".format(dividend,g,DIV))
d = Port.getDataFromHTML(url='https://www.set.or.th/set/companyhighlight.do?symbol='+symbol+'&ssoPageId=5&language=th&country=TH',index_col=0)
print("ราคาหุ้นปัจจุบัน {:.2f}".format(float(d.loc['ราคาล่าสุด(บาท)'].tolist()[-1])))
print("Instrinsic Value (มูลค่าราคาหุ้น) : {:.2f}".format(V))
print("----------------------------------------------")
def getAdjClose(df):
data = {}
for x in df.index:
data[x] = pdr.get_data_yahoo(x+'.BK')['Adj Close']
data = pd.DataFrame(data)
return data
def PairTrade(df,symbol1,symbol2):
data['diff'] = data[symbol1] - data[symbol2]
d = data[[symbol1,symbol2,'diff']]
d.plot()
d.plot.hist(y='diff')
d['mean'] = d['diff'].mean()
d['-1SD'] = d['diff'].mean() - d['diff'].std()
d['+1SD'] = d['diff'].mean() + d['diff'].std()
d.plot(y=['diff','-1SD','mean','+1SD'])
print("Mean : {} , SD : {} ".format(d['diff'].mean(),d['diff'].std()))
def checkCGD_fromPort(self,div):
for x in self.df.index:
Port.CGD(x,div)
myPort = Port()
myPort.describe()
myPort.checkCGD_fromPort(div=10)
| 0.306735 | 0.481576 |
<font size="+5">#02 | Decision Tree. A Supervised Classification Model</font>
- Subscribe to my [Blog ↗](https://blog.pythonassembly.com/)
- Let's keep in touch on [LinkedIn ↗](www.linkedin.com/in/jsulopz) 😄
# Discipline to Search Solutions in Google
> Apply the following steps when **looking for solutions in Google**:
>
> 1. **Necesity**: How to load an Excel in Python?
> 2. **Search in Google**: by keywords
> - `load excel python`
> - ~~how to load excel in python~~
> 3. **Solution**: What's the `function()` that loads an Excel in Python?
> - A Function to Programming is what the Atom to Phisics.
> - Every time you want to do something in programming
> - **You will need a `function()`** to make it
> - Theferore, you must **detect parenthesis `()`**
> - Out of all the words that you see in a website
> - Because they indicate the presence of a `function()`.
# Load the Data
> Load the Titanic dataset with the below commands
> - This dataset **people** (rows) aboard the Titanic
> - And their **sociological characteristics** (columns)
> - The aim of this dataset is to predict the probability to `survive`
> - Based on the social demographic characteristics.
```
import seaborn as sns
df = sns.load_dataset(name='titanic').iloc[:, :4]
df.head()
```
# `DecisionTreeClassifier()` Model in Python
## Build the Model
> 1. **Necesity**: Build Model
> 2. **Google**: How do you search for the solution?
> 3. **Solution**: Find the `function()` that makes it happen
## Code Thinking
> Which function computes the Model?
> - `fit()`
>
> How could can you **import the function in Python**?
```
fit()
algo.fit()
algo = DecisionTreeClassifier()
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit()
```
### Separate Variables for the Model
> Regarding their role:
> 1. **Target Variable `y`**
>
> - [ ] What would you like **to predict**?
>
> 2. **Explanatory Variable `X`**
>
> - [ ] Which variable will you use **to explain** the target?
```
X = df.drop(columns='survived')
y = df.survived
```
### Finally `fit()` the Model
```
model.fit(X, y)
model.fit(X, y)
X
import numpy as np
import pandas as pd
X = pd.get_dummies(X)
model.fit(X,y)
X
a = X.dropna
type(a)
a.dropna()
X = X.dropna()
X
df = pd.get_dummies(df, drop_first=True).dropna()
X = df.drop(columns='survived')
y = df.survived
X.head()
model = DecisionTreeClassifier(max_depth=4)
model.fit(X,y)
```
## Calculate a Prediction with the Model
> - `model.predict_proba()`
```
manolo = df[:1]
manolo
manolo_X = X[:1]
manolo_X
model.predict_proba(manolo_X)
model.predict(manolo_X)
```
## Model Visualization
> - `tree.plot_tree()`
```
from sklearn.tree import plot_tree
import matplotlib.pyplot as plt
X.columns
manolo
plt.figure(figsize=(20,10))
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
39/330
```
## Model Interpretation
> Why `sex` is the most important column? What has to do with **EDA** (Exploratory Data Analysis)?
```
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/7VeUPuFGJHk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
```
# Prediction vs Reality
> How good is our model?
## Precision
> - `model.score()`
```
model.score(X,y)
dfsel = df[['survived']].copy()
dfsel['pred'] = model.predict(X)
(dfsel.survived - dfsel.pred).mean()
dfsel
df["dif"]= (dfsel.pred - dfsel.survived)
(df.dif**2).sum()
(df.dif**2).sum()/714
1 - (df.dif**2).sum()/714
model.score(X,y)
comp = dfsel.survived == dfsel.pred
comp
comp.sum()
comp.sum()/714
comp.mean()
model.score(X,y)
dfsel
```
## Confusion Matrix
> 1. **Sensitivity** (correct prediction on positive value, $y=1$)
> 2. **Specificity** (correct prediction on negative value $y=0$).
```
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
dfsel
mat = confusion_matrix(y_true = dfsel.survived, y_pred=dfsel.pred)
mat
a = ConfusionMatrixDisplay(mat)
a.plot()
166/(124+166)
416/(416 + 8)
from sklearn.metrics import classification_report
report = classification_report(y_true = dfsel.survived, y_pred=dfsel.pred)
report
print(report)
```
## ROC Curve
> A way to summarise all the metrics (score, sensitivity & specificity)
|
github_jupyter
|
import seaborn as sns
df = sns.load_dataset(name='titanic').iloc[:, :4]
df.head()
fit()
algo.fit()
algo = DecisionTreeClassifier()
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit()
X = df.drop(columns='survived')
y = df.survived
model.fit(X, y)
model.fit(X, y)
X
import numpy as np
import pandas as pd
X = pd.get_dummies(X)
model.fit(X,y)
X
a = X.dropna
type(a)
a.dropna()
X = X.dropna()
X
df = pd.get_dummies(df, drop_first=True).dropna()
X = df.drop(columns='survived')
y = df.survived
X.head()
model = DecisionTreeClassifier(max_depth=4)
model.fit(X,y)
manolo = df[:1]
manolo
manolo_X = X[:1]
manolo_X
model.predict_proba(manolo_X)
model.predict(manolo_X)
from sklearn.tree import plot_tree
import matplotlib.pyplot as plt
X.columns
manolo
plt.figure(figsize=(20,10))
plot_tree(decision_tree=model, feature_names=X.columns, filled=True);
39/330
%%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/7VeUPuFGJHk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
model.score(X,y)
dfsel = df[['survived']].copy()
dfsel['pred'] = model.predict(X)
(dfsel.survived - dfsel.pred).mean()
dfsel
df["dif"]= (dfsel.pred - dfsel.survived)
(df.dif**2).sum()
(df.dif**2).sum()/714
1 - (df.dif**2).sum()/714
model.score(X,y)
comp = dfsel.survived == dfsel.pred
comp
comp.sum()
comp.sum()/714
comp.mean()
model.score(X,y)
dfsel
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
dfsel
mat = confusion_matrix(y_true = dfsel.survived, y_pred=dfsel.pred)
mat
a = ConfusionMatrixDisplay(mat)
a.plot()
166/(124+166)
416/(416 + 8)
from sklearn.metrics import classification_report
report = classification_report(y_true = dfsel.survived, y_pred=dfsel.pred)
report
print(report)
| 0.398172 | 0.977176 |
# 1. Frame the Problem
1. Define the objective in business terms.
2. How will your solution be used?
3. What are the current solutions/workarounds (if any)?
4. How should you frame this problem (supervised/unsupervised, online/offline, etc.)
5. How should performance be measured?
6. Is the performance measure aligned with the business objective?
7. What would be the minimum performance needed to reach the business objective?
8. What are comparable problems? Can you reuse experience or tools?
9. Is human expertise available?
10. How would you solve the problem manually?
11. List the assumptions you or others have made so far.
12. Verify assumptions if possible.
# 2. Get the Data
Note: automate as much as possible so you can easily get fresh data.
1. List the data you need and how much you need.
2. Find and document where you can get that data.
3. Check how much space it will take.
4. Check legal obligations, and get the authorization if necessary.
5. Get access authorizations.
6. Create a workspace (with enough storage space).
7. Get the data.
8. Convert the data to a format you can easily manipulate (without changing the data itself).
9. Ensure sensitive information is deleted or protected (e.g., anonymized).
10. Check the size and type of data (time series, sample, geographical, etc.).
11. Sample a test set, put it aside, and never look at it (no data snooping!).
```
# Numpy is the math library behind everything
# https://numpy.org/
import numpy as np
# Pandas has the data frame class used in a lot of work
# https://pandas.pydata.org/
import pandas as pd
# Matplotlib is my preferred plotting library
# https://matplotlib.org/
import matplotlib.pyplot as plt
# You will need to split up your data
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
from sklearn.model_selection import train_test_split
# You will want to cross validate your testing data
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html
from sklearn.model_selection import cross_val_score
# Tune your parameters and find the best one
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
from sklearn.model_selection import GridSearchCV
# Randomized search on hyper parameters.
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html
from sklearn.model_selection import RandomizedSearchCV
# You should use pipelines to automate the flow
# https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
from sklearn.pipeline import Pipeline
# Set the backend of matplotlib to work better with Jupyter
# https://stackoverflow.com/questions/43027980/purpose-of-matplotlib-inline
%matplotlib inline
```
Sci-Kit Tools for getting data:
1. https://scikit-learn.org/stable/modules/classes.html#module-sklearn.datasets
2. https://scikit-learn.org/stable/datasets/index.html#datasets
Sources of Data
1. https://www.openml.org/
2. https://archive.ics.uci.edu/ml/datasets.php
3. https://github.com/awesomedata/awesome-public-datasets
4. https://www.data.gov/
5. https://www.kaggle.com/datasets
6. https://www.google.com/publicdata/directory
7. https://registry.opendata.aws/
8. https://azure.microsoft.com/en-us/services/open-datasets/
9. https://www.reddit.com/r/datasets/
# 3. Explore the Data
Note: try to get insights from a field expert for these steps.
1. Create a copy of the data for exploration (sampling it down to a manageable size if necessary).
2. Create a Jupyter notebook to keep record of your data exploration.
3. Study each attribute and its characteristics:
- Name
- Type (categorical, int/float, bounded/unbounded, text, structured, etc.)
- % of missing values
- Noisiness and type of noise (stochastic, outliers, rounding errors, etc.)
- Possibly useful for the task?
- Type of distribution (Gaussian, uniform, logarithmic, etc.)
4. For supervised learning tasks, identify the target attribute(s).
5. Visualize the data.
6. Study the correlations between attributes.
7. Study how you would solve the problem manually.
8. Identify the promising transformations you may want to apply.
9. Identify extra data that would be useful (go back to "Get the Data" on page 502).
10. Document what you have learned.
# 4. Prepare the Data
Notes:
- Work on copies of the data (keep the original dataset intact).
- Write functions for all data transformations you apply, for five reasons:
- So you can easily prepare the data the next time you get a fresh dataset
- So you can apply these transformations in future projects
- To clean and prepare the test set
- To clean and prepare new data instances
- To make it easy to treat your preparation choices as hyperparameters
1. Data cleaning:
- Fix or remove outliers (optional).
- Fill in missing values (e.g., with zero, mean, median...) or drop their rows (or columns).
2. Feature selection (optional):
- Drop the attributes that provide no useful information for the task.
3. Feature engineering, where appropriates:
- Discretize continuous features.
- Decompose features (e.g., categorical, date/time, etc.).
- Add promising transformations of features (e.g., log(x), sqrt(x), x^2, etc.).
- Aggregate features into promising new features.
4. Feature scaling: standardize or normalize features.
# 5. Explore Models
Notes:
- If the data is huge, you may want to sample smaller training sets so you can train many different models in a reasonable time (be aware that this penalizes complex models such as large neural nets or Random Forests).
- Once again, try to automate these steps as much as possible.
1. Train many quick and dirty models from different categories (e.g., linear, naive, Bayes, SVM, Random Forests, neural net, etc.) using standard parameters.
2. Measure and compare their performance.
- For each model, use N-fold cross-validation and compute the mean and standard deviation of their performance.
3. Analyze the most significant variables for each algorithm.
4. Analyze the types of errors the models make.
- What data would a human have used to avoid these errors?
5. Have a quick round of feature selection and engineering.
6. Have one or two more quick iterations of the five previous steps.
7. Short-list the top three to five most promising models, preferring models that make different types of errors.
# 6. Fine Tune Models
Notes:
- You will want to use as much data as possible for this step, especially as you move toward the end of fine-tuning.
- As always automate what you can.
1. Fine-tune the hyperparameters using cross-validation.
- Treat your data transformation choices as hyperparameters, especially when you are not sure about them (e.g., should I replace missing values with zero or the median value? Or just drop the rows?).
- Unless there are very few hyperparamter values to explore, prefer random search over grid search. If training is very long, you may prefer a Bayesian optimization approach (e.g., using a Gaussian process priors, as described by Jasper Snoek, Hugo Larochelle, and Ryan Adams ([https://goo.gl/PEFfGr](https://goo.gl/PEFfGr)))
2. Try Ensemble methods. Combining your best models will often perform better than running them invdividually.
3. Once you are confident about your final model, measure its performance on the test set to estimate the generalization error.
> Don't tweak your model after measuring the generalization error: you would just start overfitting the test set.
# 7. Present Solution
1. Document what you have done.
2. Create a nice presentation.
- Make sure you highlight the big picture first.
3. Explain why your solution achieves the business objective.
4. Don't forget to present interesting points you noticed along the way.
- Describe what worked and what did not.
- List your assumptions and your system's limitations.
5. Ensure your key findings are communicated through beautiful visualizations or easy-to-remember statements (e.g., "the median income is the number-one predictor of housing prices").
# 8. Launch and Monitor
1. Get your solution ready for production (plug into production data inputs, write unit tests, etc.).
2. Write monitoring code to check your system's live performance at regular intervals and trigger alerts when it drops.
- Beware of slow degradation too: models tend to "rot" as data evolves.
- Measuring performance may require a human pipeline (e.g., via a crowdsourcing service).
- Also monitor your inputs' quality (e.g., a malfunctioning sensor sending random values, or another team's output becoming stale). This is particulary important for online learning systems.
3. Retrain your models on a regular basis on fresh data (automate as much as possible).
|
github_jupyter
|
# Numpy is the math library behind everything
# https://numpy.org/
import numpy as np
# Pandas has the data frame class used in a lot of work
# https://pandas.pydata.org/
import pandas as pd
# Matplotlib is my preferred plotting library
# https://matplotlib.org/
import matplotlib.pyplot as plt
# You will need to split up your data
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
from sklearn.model_selection import train_test_split
# You will want to cross validate your testing data
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html
from sklearn.model_selection import cross_val_score
# Tune your parameters and find the best one
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
from sklearn.model_selection import GridSearchCV
# Randomized search on hyper parameters.
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html
from sklearn.model_selection import RandomizedSearchCV
# You should use pipelines to automate the flow
# https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
from sklearn.pipeline import Pipeline
# Set the backend of matplotlib to work better with Jupyter
# https://stackoverflow.com/questions/43027980/purpose-of-matplotlib-inline
%matplotlib inline
| 0.772874 | 0.973869 |
# Netops
## Predictive Infrastructure Monitoring
**Overview**
The netops demo demonstrates predictive infrastructure monitoring: the application builds, trains, and deploys a machine-learning model for analyzing and predicting failure in network devices as part of a network operations (NetOps) flow.
This full system demo is used to showcase multiple ways to build machine learning pipelines:
- [**Jupyter notebook**](#jupyter-dask): Using jupyter as our IDE: generating the data, exploring, training a model, deploying a nuclio function for inference and a grafana dashboard to monitor. <br>
Note that for running the code at slace we are using Daks. Dask is an open source library for running distributed python
- [**Nuclio**](#nuclio): Leveraging Nuclio (servless function framework) for deploying the model from Jupyter as a nuclio function that can run in a serving layer
Note that under the pipelines directory we've created the same demo where each step can be deployed and run as a Nuclio function <br>
By doing that users can build an automated pipeline and use those functions as components in the pipeline
### Demo structure
The demo is comprised of four main parts:
**Generator**:
\[[Jupyter-Dask](01-generator.ipynb) | [Nuclio](pipelines/nuclio-generator.ipynb)]
Using our open source [deployment generator](https://github.com/zilbermanor/deployment_generator/tree/master/v3io_generator) (Which you can pip install [here](https://test.pypi.org/manage/project/v3io-generator/releases/)) we create a network deployment (e.g. with default names for Company, Data center, Device).
We then add our metrics via [metrics configuration](configurations/metrics_configuration.yaml). (Defaults to CPU Utilization, Latency, Throughput, Packet loss).
The generator can create both normal device metrics as defined by the Yaml, and error scenarios that cascade through the metrics until the device reaches a critical failure.
To see the devices behaviour you can look at the **[Exploration](02-explore.ipynb)** notebook.
**Data Preprocessing**:
\[[Jupyter-Dask](01-generator.ipynb) | [Nuclio](pipelines/nuclio-data-preperations.ipynb)]
Turning the device's metrics stream to a feature vector using aggregations from multiple timespans (Current, Minutely, Hourly)
**Training**:
\[[Jupyter-Dask](03-training.ipynb) | [Nuclio](pipelines/nuclio-training.ipynb)]
Using the feature vectors from the previous steps, and the **is_error** metrics given by the generator, train a ML model (Spans from scikit based to XGBoost & TF).
The model is then saved to a file for future usage.
**Inference**:
\[[Jupyter-Dask](04-infer.ipynb) | [Nuclio](pipelines/nuclio-inference.ipynb)]
Create a Nuclio function based on the model file from the previous step and the feature vectors created by the Preprocessing stage <br>
In this notebook we demonstrate how to take a model written in Python and easily convert it to a serverless function
This function can run in a serving layer as part of a production pipeline.The goal for this model predicts if a device is about to fail.
## Jupyter-Dask
- [Generator](01-generator.ipynb)
- [Exploration](02-explore.ipynb)
- [Training](03-training.ipynb)
- [Inference](04-infer.ipynb)
- [Dashboard](05-grafana.ipynb)
## Nuclio
- [Generator](pipelines/nuclio-generator.ipynb)
- [PreProcessing](pipelines/nuclio-data-preperations.ipynb)
- [Training](pipelines/nuclio-training.ipynb)
- [Inference](pipelines/nuclio-inference.ipynb)
|
github_jupyter
|
# Netops
## Predictive Infrastructure Monitoring
**Overview**
The netops demo demonstrates predictive infrastructure monitoring: the application builds, trains, and deploys a machine-learning model for analyzing and predicting failure in network devices as part of a network operations (NetOps) flow.
This full system demo is used to showcase multiple ways to build machine learning pipelines:
- [**Jupyter notebook**](#jupyter-dask): Using jupyter as our IDE: generating the data, exploring, training a model, deploying a nuclio function for inference and a grafana dashboard to monitor. <br>
Note that for running the code at slace we are using Daks. Dask is an open source library for running distributed python
- [**Nuclio**](#nuclio): Leveraging Nuclio (servless function framework) for deploying the model from Jupyter as a nuclio function that can run in a serving layer
Note that under the pipelines directory we've created the same demo where each step can be deployed and run as a Nuclio function <br>
By doing that users can build an automated pipeline and use those functions as components in the pipeline
### Demo structure
The demo is comprised of four main parts:
**Generator**:
\[[Jupyter-Dask](01-generator.ipynb) | [Nuclio](pipelines/nuclio-generator.ipynb)]
Using our open source [deployment generator](https://github.com/zilbermanor/deployment_generator/tree/master/v3io_generator) (Which you can pip install [here](https://test.pypi.org/manage/project/v3io-generator/releases/)) we create a network deployment (e.g. with default names for Company, Data center, Device).
We then add our metrics via [metrics configuration](configurations/metrics_configuration.yaml). (Defaults to CPU Utilization, Latency, Throughput, Packet loss).
The generator can create both normal device metrics as defined by the Yaml, and error scenarios that cascade through the metrics until the device reaches a critical failure.
To see the devices behaviour you can look at the **[Exploration](02-explore.ipynb)** notebook.
**Data Preprocessing**:
\[[Jupyter-Dask](01-generator.ipynb) | [Nuclio](pipelines/nuclio-data-preperations.ipynb)]
Turning the device's metrics stream to a feature vector using aggregations from multiple timespans (Current, Minutely, Hourly)
**Training**:
\[[Jupyter-Dask](03-training.ipynb) | [Nuclio](pipelines/nuclio-training.ipynb)]
Using the feature vectors from the previous steps, and the **is_error** metrics given by the generator, train a ML model (Spans from scikit based to XGBoost & TF).
The model is then saved to a file for future usage.
**Inference**:
\[[Jupyter-Dask](04-infer.ipynb) | [Nuclio](pipelines/nuclio-inference.ipynb)]
Create a Nuclio function based on the model file from the previous step and the feature vectors created by the Preprocessing stage <br>
In this notebook we demonstrate how to take a model written in Python and easily convert it to a serverless function
This function can run in a serving layer as part of a production pipeline.The goal for this model predicts if a device is about to fail.
## Jupyter-Dask
- [Generator](01-generator.ipynb)
- [Exploration](02-explore.ipynb)
- [Training](03-training.ipynb)
- [Inference](04-infer.ipynb)
- [Dashboard](05-grafana.ipynb)
## Nuclio
- [Generator](pipelines/nuclio-generator.ipynb)
- [PreProcessing](pipelines/nuclio-data-preperations.ipynb)
- [Training](pipelines/nuclio-training.ipynb)
- [Inference](pipelines/nuclio-inference.ipynb)
| 0.895667 | 0.92944 |

# Higher Dimension Visualizations
Run the following code in your Jupyter notebook to import the pandas library and recreate the `pets` DataFrame.
```
#load "pandas" library under the alias "pd"
import pandas as pd
#identify the location of our online data
url = "https://raw.githubusercontent.com/callysto/online-courses/master/CallystoAndDataScience/data/pets-bootstrap.csv"
#read csv file from url and create a dataframe
pets = pd.read_csv(url)
#display the head of the data
pets.head()
```
We learned in previous modules that we can create a scatter plot to evaluate the relationship between two variables.
For example, let's say we want to study the relationship between age of a pet and how long it took for the pet to be adopted.
```
import plotly.express as px
import plotly.io as pio
# Create scatter plot
scatter_pet = px.scatter(pets,
x="Time to Adoption (weeks)",
y="Age (years)",
title="Age (in years) and Time to Adoption (weeks) for each pet",
color ="Species",hover_name="Name")
scatter_pet.show()
```
Suppose now that we are interested in comparing the weight of a pet, and how long it took for the pet to be adopted.
```
# Create scatter plot
scatter_pet2 = px.scatter(pets,
x="Time to Adoption (weeks)",
y="Weight (lbs)",
title="Weight (lbs) and Time to Adoption (weeks) for each pet",
color ="Species",hover_name="Name")
scatter_pet2.show()
```
Although it was relatively easy to create two plots to compare each, it is worth asking, could we compare both age and weight, relative to how long it took for the pets to be adopted? Yes!
In the next section, we will learn how to use the `scatter_3d()` within Plotly to do this.
### Passing three variables.
Run the code below to create a 3D scatter plot of the pets' weight, age and time to adoption, where we will use three variables `x, y, z` such that
`x = 'Weight (lbs)'`
`y = 'Age (years)'`
`z = 'Time to Adoption (weeks)'`
```
# Create 3D scatter plot
fig = px.scatter_3d(pets,
x='Weight (lbs)',
y='Age (years)',
z='Time to Adoption (weeks)',
hover_name="Name",title='Age, weight and time to adoption of pets')
fig.show()
```
Hovering over the plots let us see that Kujo, an 8-year old pet that weighs 172 lbs was adopted after 30 weeks is the pet that took the longest to be adopted.
Read more about 3D scatter plots here https://plotly.com/python/3d-scatter-plots/.
## 4D+ Plots
While we cannot visualize more than three dimensions, we can incorporate more than three variables by incorporating different symbols and colours.
Let's suppose for instance, that we want to identify the gender of the pet in addition to their age, weight and time to adoption.
```
fig = px.scatter_3d(pets,
x='Weight (lbs)',
y='Age (years)',
z='Time to Adoption (weeks)',
color='Gender',
hover_name="Name", title='Age, weight, gender and time to adoption of pets')
fig.show()
```
We can now see that Kujo is a male pet.
Let's add one more dimension by incorporating symbols and let's categorize by species.
```
fig = px.scatter_3d(pets,
x='Weight (lbs)',
y='Age (years)',
z='Time to Adoption (weeks)',
color='Gender',
symbol='Species',
opacity=0.5,
hover_name="Name", title='Species, age, weight, gender and time to adoption of pets')
fig.show()
# Save to HTML file
# pio.write_html(fig,"3D_plus_Scatter_plot_species.html", auto_open=True)
```
We can then see that Kujo is an 8-year old male dog, that weighs 172 lbs, and that it took Kujo 30 weeks to be adopted.
## Surface Plots
Another way we can represent three variables in a plot is by using surface plots.
As before, we will pass `x,y,z` which contain arrays with datapoints to create a 3D surface.
```
import numpy as np
x = np.outer(np.linspace(-2, 2, 30), np.ones(30))
y = x.copy().T # transpose
z = np.cos(x ** 2 + y ** 2)
```
Exercise:
Explore `x,y,z` by printing its contents.
```
print(x)
print(y)
print(z)
```
We can then plot them using the `Surface` function from `plotly.graph_objects`.
```
import plotly.graph_objects as go
trace = go.Surface(x = x, y = y, z =z )
data = [trace]
layout = go.Layout(title = '3D Surface plot')
fig = go.Figure(data = data)
fig.show()
# Write to HTML file
#pio.write_html(fig,"surface_plot3d.html")
```
### Read more
3D Parametric Plots https://plotly.com/python/v3/3d-parametric-plots/
Surface Plots https://plotly.com/python/3d-surface-plots/
3D Scatter Plots https://plotly.com/python/3d-scatter-plots/
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
|
github_jupyter
|
#load "pandas" library under the alias "pd"
import pandas as pd
#identify the location of our online data
url = "https://raw.githubusercontent.com/callysto/online-courses/master/CallystoAndDataScience/data/pets-bootstrap.csv"
#read csv file from url and create a dataframe
pets = pd.read_csv(url)
#display the head of the data
pets.head()
import plotly.express as px
import plotly.io as pio
# Create scatter plot
scatter_pet = px.scatter(pets,
x="Time to Adoption (weeks)",
y="Age (years)",
title="Age (in years) and Time to Adoption (weeks) for each pet",
color ="Species",hover_name="Name")
scatter_pet.show()
# Create scatter plot
scatter_pet2 = px.scatter(pets,
x="Time to Adoption (weeks)",
y="Weight (lbs)",
title="Weight (lbs) and Time to Adoption (weeks) for each pet",
color ="Species",hover_name="Name")
scatter_pet2.show()
# Create 3D scatter plot
fig = px.scatter_3d(pets,
x='Weight (lbs)',
y='Age (years)',
z='Time to Adoption (weeks)',
hover_name="Name",title='Age, weight and time to adoption of pets')
fig.show()
fig = px.scatter_3d(pets,
x='Weight (lbs)',
y='Age (years)',
z='Time to Adoption (weeks)',
color='Gender',
hover_name="Name", title='Age, weight, gender and time to adoption of pets')
fig.show()
fig = px.scatter_3d(pets,
x='Weight (lbs)',
y='Age (years)',
z='Time to Adoption (weeks)',
color='Gender',
symbol='Species',
opacity=0.5,
hover_name="Name", title='Species, age, weight, gender and time to adoption of pets')
fig.show()
# Save to HTML file
# pio.write_html(fig,"3D_plus_Scatter_plot_species.html", auto_open=True)
import numpy as np
x = np.outer(np.linspace(-2, 2, 30), np.ones(30))
y = x.copy().T # transpose
z = np.cos(x ** 2 + y ** 2)
print(x)
print(y)
print(z)
import plotly.graph_objects as go
trace = go.Surface(x = x, y = y, z =z )
data = [trace]
layout = go.Layout(title = '3D Surface plot')
fig = go.Figure(data = data)
fig.show()
# Write to HTML file
#pio.write_html(fig,"surface_plot3d.html")
| 0.458591 | 0.987092 |
# Laboratory 02
## Requirements
For the second part of the exercises you will need the `wikipedia` package. On Windows machines, use the following command in the Anaconda Prompt (`Start --> Anaconda --> Anaconda Prompt`):
conda install -c conda-forge wikipedia
This command should work with other Anaconda environments (OSX, Linux).
If you are using virtualenv directly instead of Anaconda, the following command installs it in your virtualenv:
pip install wikipedia
or
sudo pip install wikipedia
installs it system-wide.
You are encouraged to reuse functions that you defined in earlier exercises.
## 1.1 Define a function that takes a sequence as its input and returns whether the sequence is symmetric. A sequence is symmetric if it is equal to its reverse.
```
def is_symmetric(l):
# TODO
assert(is_symmetric([1]) == True)
assert(is_symmetric([]) == True)
assert(is_symmetric([1, 2, 3, 1]) == False)
assert(is_symmetric([1, "foo", "bar", "foo", 1]) == True)
assert(is_symmetric("abcba") == True)
```
## 1.2 Define a function that takes a sequence and an integer $k$ as its input and returns the $k$ largest element. Do not use the built-in `max` function. Do not change the original sequence. If $k$ is not specified return one element in a list.
```
def k_largest(l, k=1):
pass
l = [-1, 0, 3, 2]
assert(k_largest(l) == [3])
assert(k_largest(l, 2) == [2, 3] or k_largest(l, 2))
```
## \*1.3 Add an optional `key` argument that works analogously to the built-in `sorted`'s key argument.
Define a function that takes a matrix as an input represented as a list of lists (you can assume that the input is a valid matrix). Return its transpose without changing the original matrix.
```
def transpose(M):
# TODO
m1 = [[1, 2, 3], [4, 5, 6]]
m2 = [[1, 4], [2, 5], [3, 6]]
assert(transpose(m1) == m2)
assert(transpose(transpose(m1)) == m1)
```
## 2.1 Define a function that takes a string as its input and return a dictionary with the character frequencies.
```
def char_freq(s):
# TODO
assert(char_freq("aba") == {"a": 2, "b": 1})
```
## 2.2 Add an optional `skip_symbols` to the `char_freq` function. `skip_symbols` is the set of symbols that should be excluded from the frequence dictionary. If this argument is not specified, the function should include every symbol.
```
def char_freq_with_skip(s, skip_symbols=None):
# TODO
assert(char_freq_with_skip("ab.abc?", skip_symbols=".?") == {"a": 2, "b": 2, "c": 1})
```
## 2.2 Define a function that computes word frequencies in a text.
```
def word_freq(s):
# TODO
s = "the green tea and the black tea"
assert(word_freq(s) == {"the": 2, "tea": 2, "green": 1, "black": 1, "and": 1})
```
## 2.3 Define a function that count the uppercase letters in a string.
```
def count_upper_case(s):
# TODO
assert(count_upper_case("A") == 1)
assert(count_upper_case("abA bcCa") == 2)
```
## 2.4 Define a function that takes two strings and decides whether they are anagrams. A string is an anagram of another string if its letters can be rearranged so that it equals the other string.
For example:
```
abc -- bac
aabb -- abab
```
Counter examples:
```
abc -- aabc
abab -- aaab
```
```
def anagram(s1, s2):
# TODO
assert(anagram("abc", "bac") == True)
assert(anagram("aabb", "abab") == True)
assert(anagram("abab", "aaab") == False)
```
## 2.5. Define a sentence splitter function that takes a string and splits it into a list of sentences. Sentences end with `.` and the new sentence must start with a whitespace (`str.isspace`) or be the end of the string. See the examples below.
```
def sentence_splitter(s):
# TODO
assert(sentence_splitter("A.b. acd.") == ['A.b', 'acd'])
assert(sentence_splitter("A. b. acd.") == ['A', 'b', 'acd'])
```
## Wikipedia module
The following exercises use the `wikipedia` package. The basic usage is illustrated below.
The documentation is available [here](https://pypi.python.org/pypi/wikipedia/).
Searching for pages:
```
import wikipedia
results = wikipedia.search("Budapest")
results
```
Downloading an article:
```
article = wikipedia.page("Budapest")
article.summary[:100]
```
The content attribute contains the full text:
```
type(article.content), len(article.content)
```
By default the module downloads the English Wikipedia. The language can be changed the following way:
```
wikipedia.set_lang("fr")
wikipedia.search("Budapest")
fr_article = wikipedia.page("Budapest")
fr_article.summary[:100]
```
## 3.0 Change the language back to English and test the package with a few other pages.
## 3.1 Download 4-5 arbitrary pages from the English Wikipedia (they should exceed 100000 characters combined) and compute the word frequencies using your previously defined function(s). Print the most common 20 words in the following format (the example is not the correct answer):
```
unintelligent <TAB> 123456
moribund <TAB> 123451
...
```
The words and their frequency are separated by TABS and no additional whitespace should be added.
## 3.2 Repeat the same exercise for your native language if it denotes word boundaries with spaces. If it doesn't choose an arbitrary language other than English.
## 3.3 Define a function that takes a string and returns its bigram frequencies as a dictionary.
Character bigrams are pairs of subsequent characters. For example word `apple` contains the following bigrams: `ap, pp, pl, le`.
They are used for language modeling.
## 3.4 Using your previous English collection compute bigram frequencies.
What are the 10 most common and 10 least common bigrams?
## \*3.5 Define a function that takes two parameters: a string and an integer N and returns the N-gram frequencies of the string. For $N=2$ the function works the same as in the previous example.
Try the function for $N=1..5$. How many unique N-grams are in your collection?
## 3.6 Compute the same statistics for your native language.
|
github_jupyter
|
def is_symmetric(l):
# TODO
assert(is_symmetric([1]) == True)
assert(is_symmetric([]) == True)
assert(is_symmetric([1, 2, 3, 1]) == False)
assert(is_symmetric([1, "foo", "bar", "foo", 1]) == True)
assert(is_symmetric("abcba") == True)
def k_largest(l, k=1):
pass
l = [-1, 0, 3, 2]
assert(k_largest(l) == [3])
assert(k_largest(l, 2) == [2, 3] or k_largest(l, 2))
def transpose(M):
# TODO
m1 = [[1, 2, 3], [4, 5, 6]]
m2 = [[1, 4], [2, 5], [3, 6]]
assert(transpose(m1) == m2)
assert(transpose(transpose(m1)) == m1)
def char_freq(s):
# TODO
assert(char_freq("aba") == {"a": 2, "b": 1})
def char_freq_with_skip(s, skip_symbols=None):
# TODO
assert(char_freq_with_skip("ab.abc?", skip_symbols=".?") == {"a": 2, "b": 2, "c": 1})
def word_freq(s):
# TODO
s = "the green tea and the black tea"
assert(word_freq(s) == {"the": 2, "tea": 2, "green": 1, "black": 1, "and": 1})
def count_upper_case(s):
# TODO
assert(count_upper_case("A") == 1)
assert(count_upper_case("abA bcCa") == 2)
abc -- bac
aabb -- abab
abc -- aabc
abab -- aaab
def anagram(s1, s2):
# TODO
assert(anagram("abc", "bac") == True)
assert(anagram("aabb", "abab") == True)
assert(anagram("abab", "aaab") == False)
def sentence_splitter(s):
# TODO
assert(sentence_splitter("A.b. acd.") == ['A.b', 'acd'])
assert(sentence_splitter("A. b. acd.") == ['A', 'b', 'acd'])
import wikipedia
results = wikipedia.search("Budapest")
results
article = wikipedia.page("Budapest")
article.summary[:100]
type(article.content), len(article.content)
wikipedia.set_lang("fr")
wikipedia.search("Budapest")
fr_article = wikipedia.page("Budapest")
fr_article.summary[:100]
unintelligent <TAB> 123456
moribund <TAB> 123451
...
| 0.147586 | 0.955068 |
# Automatic differentiation with `autograd`
We train models to get better and better as a function of experience. Usually, getting better means minimizing a loss function. To achieve this goal, we often iteratively compute the gradient of the loss with respect to weights and then update the weights accordingly. While the gradient calculations are straightforward through a chain rule, for complex models, working it out by hand can be a pain.
Before diving deep into the model training, let's go through how MXNet’s `autograd` package expedites this work by automatically calculating derivatives.
## Basic usage
Let's first import the `autograd` package.
```
from mxnet import nd
from mxnet import autograd
```
As a toy example, let’s say that we are interested in differentiating a function $f(x) = 2 x^2$ with respect to parameter $x$. We can start by assigning an initial value of $x$.
```
x = nd.array([[1, 2], [3, 4]])
x
```
Once we compute the gradient of $f(x)$ with respect to $x$, we’ll need a place to store it. In MXNet, we can tell an NDArray that we plan to store a gradient by invoking its `attach_grad` method.
```
x.attach_grad()
```
Now we’re going to define the function $y=f(x)$. To let MXNet store $y$, so that we can compute gradients later, we need to put the definition inside a `autograd.record()` scope.
```
with autograd.record():
y = 2 * x * x
```
Let’s invoke back propagation (backprop) by calling `y.backward()`. When $y$ has more than one entry, `y.backward()` is equivalent to `y.sum().backward()`.
<!-- I'm not sure what this second part really means. I don't have enough context. TMI?-->
```
y.backward()
```
Now, let’s see if this is the expected output. Note that $y=2x^2$ and $\frac{dy}{dx} = 4x$, which should be `[[4, 8],[12, 16]]`. Let's check the automatically computed results:
```
x.grad
```
## Using Python control flows
Sometimes we want to write dynamic programs where the execution depends on some real-time values. MXNet will record the execution trace and compute the gradient as well.
Consider the following function `f`: it doubles the inputs until it's `norm` reaches 1000. Then it selects one element depending on the sum of its elements.
<!-- I wonder if there could be another less "mathy" demo of this -->
```
def f(a):
b = a * 2
while b.norm().asscalar() < 1000:
b = b * 2
if b.sum().asscalar() >= 0:
c = b[0]
else:
c = b[1]
return c
```
We record the trace and feed in a random value:
```
a = nd.random.uniform(shape=2)
a.attach_grad()
with autograd.record():
c = f(a)
c.backward()
```
We know that `b` is a linear function of `a`, and `c` is chosen from `b`. Then the gradient with respect to `a` be will be either `[c/a[0], 0]` or `[0, c/a[1]]`, depending on which element from `b` we picked. Let's find the results:
```
[a.grad, c/a]
```
|
github_jupyter
|
from mxnet import nd
from mxnet import autograd
x = nd.array([[1, 2], [3, 4]])
x
x.attach_grad()
with autograd.record():
y = 2 * x * x
y.backward()
x.grad
def f(a):
b = a * 2
while b.norm().asscalar() < 1000:
b = b * 2
if b.sum().asscalar() >= 0:
c = b[0]
else:
c = b[1]
return c
a = nd.random.uniform(shape=2)
a.attach_grad()
with autograd.record():
c = f(a)
c.backward()
[a.grad, c/a]
| 0.384334 | 0.989791 |
```
# importing the required libraries
library(keras)
library(abind)
library(grid)
# loading the data and reshaping it
mnist <- dataset_fashion_mnist()
x_train <- mnist$train$x/255
x_test <- mnist$test$x/255
x_train <- array_reshape(x_train, c(nrow(x_train), 784), order = "F")
x_test <- array_reshape(x_test, c(nrow(x_test), 784), order = "F")
# defining the network parameters
batch_size <- 100L
input_dim <- 784L
latent_dim <- 2L
epochs <- 10
# input layer and hidden layer of the encoder part of the VAE
input <- layer_input(shape = c(input_dim))
x <- input %>% layer_dense(units = 256, activation = "relu")
# defining dense layers repesenting mean and log of standard deviation of the latent distribution
# mean of latent distribution
z_mean <- x %>% layer_dense(units = latent_dim,name = "mean")
# log variance of latent distribution
z_log_sigma <- x %>% layer_dense(units = latent_dim,name = "sigma")
# sampling function to sample points from latent space
sampling <- function(arg) {
z_mean <- arg[, 1:(latent_dim)]
z_log_var <- arg[, (latent_dim + 1):(2 * latent_dim)]
epsilon <- k_random_normal(shape = list(k_shape(z_mean)[1], latent_dim),
mean = 0, stddev = 1)
z_mean + k_exp(z_log_sigma) * epsilon
}
# generating a random point from the latent distributiom
z <- layer_concatenate(list(z_mean, z_log_sigma)) %>% layer_lambda(sampling)
# hidden layers of the decoder part of VAE
x_1 <- layer_dense(units = 256, activation = "relu")
x_2 <- layer_dense(units = input_dim, activation = "sigmoid")
# decoder output
vae_output <- x_2(x_1(z))
# building the variational autoencoder model
vae <- keras_model(input, vae_output)
summary(vae)
# building separate encoder model that maps inputs to latent space
encoder <- keras_model(input, c(z_mean,z_log_sigma))
summary(encoder)
# building separate decoder model
# Decoder input
decoder_input <- layer_input(k_int_shape(z)[-1])
# Decoder hidden layers
decoder_output <- x_2(x_1(decoder_input))
# Decoder
decoder <- keras_model(decoder_input,decoder_output)
summary(decoder)
# defining loss function
vae_loss <- function(x, decoded_output){
reconstruction_loss <- (input_dim/1.0)*loss_binary_crossentropy(x, decoded_output)
kl_loss <- -0.5*k_mean(1 + z_log_sigma - k_square(z_mean) - k_exp(z_log_sigma), axis = -1L)
reconstruction_loss + kl_loss
}
# compiling the model
vae %>% compile(optimizer = "rmsprop", loss = vae_loss)
# training the model
vae %>% fit(
x_train, x_train,
shuffle = TRUE,
epochs = epochs,
batch_size = batch_size,
validation_data = list(x_test, x_test)
)
# generating sample images
random_distribution = array(rnorm(n = 20,mean = 0,sd = 4),dim = c(10,2))
predicted = array_reshape(predict(decoder,matrix(c(0,0),ncol=2)),dim = c(28,28))
for(i in seq(1,nrow(random_distribution))){
one_pred = predict(decoder,matrix(random_distribution[i,],ncol=2))
predicted = abind(predicted,array_reshape(one_pred,dim = c(28,28)),along = 2)
}
options(repr.plot.width=10, repr.plot.height=1)
grid.raster(predicted,interpolate=FALSE)
```
|
github_jupyter
|
# importing the required libraries
library(keras)
library(abind)
library(grid)
# loading the data and reshaping it
mnist <- dataset_fashion_mnist()
x_train <- mnist$train$x/255
x_test <- mnist$test$x/255
x_train <- array_reshape(x_train, c(nrow(x_train), 784), order = "F")
x_test <- array_reshape(x_test, c(nrow(x_test), 784), order = "F")
# defining the network parameters
batch_size <- 100L
input_dim <- 784L
latent_dim <- 2L
epochs <- 10
# input layer and hidden layer of the encoder part of the VAE
input <- layer_input(shape = c(input_dim))
x <- input %>% layer_dense(units = 256, activation = "relu")
# defining dense layers repesenting mean and log of standard deviation of the latent distribution
# mean of latent distribution
z_mean <- x %>% layer_dense(units = latent_dim,name = "mean")
# log variance of latent distribution
z_log_sigma <- x %>% layer_dense(units = latent_dim,name = "sigma")
# sampling function to sample points from latent space
sampling <- function(arg) {
z_mean <- arg[, 1:(latent_dim)]
z_log_var <- arg[, (latent_dim + 1):(2 * latent_dim)]
epsilon <- k_random_normal(shape = list(k_shape(z_mean)[1], latent_dim),
mean = 0, stddev = 1)
z_mean + k_exp(z_log_sigma) * epsilon
}
# generating a random point from the latent distributiom
z <- layer_concatenate(list(z_mean, z_log_sigma)) %>% layer_lambda(sampling)
# hidden layers of the decoder part of VAE
x_1 <- layer_dense(units = 256, activation = "relu")
x_2 <- layer_dense(units = input_dim, activation = "sigmoid")
# decoder output
vae_output <- x_2(x_1(z))
# building the variational autoencoder model
vae <- keras_model(input, vae_output)
summary(vae)
# building separate encoder model that maps inputs to latent space
encoder <- keras_model(input, c(z_mean,z_log_sigma))
summary(encoder)
# building separate decoder model
# Decoder input
decoder_input <- layer_input(k_int_shape(z)[-1])
# Decoder hidden layers
decoder_output <- x_2(x_1(decoder_input))
# Decoder
decoder <- keras_model(decoder_input,decoder_output)
summary(decoder)
# defining loss function
vae_loss <- function(x, decoded_output){
reconstruction_loss <- (input_dim/1.0)*loss_binary_crossentropy(x, decoded_output)
kl_loss <- -0.5*k_mean(1 + z_log_sigma - k_square(z_mean) - k_exp(z_log_sigma), axis = -1L)
reconstruction_loss + kl_loss
}
# compiling the model
vae %>% compile(optimizer = "rmsprop", loss = vae_loss)
# training the model
vae %>% fit(
x_train, x_train,
shuffle = TRUE,
epochs = epochs,
batch_size = batch_size,
validation_data = list(x_test, x_test)
)
# generating sample images
random_distribution = array(rnorm(n = 20,mean = 0,sd = 4),dim = c(10,2))
predicted = array_reshape(predict(decoder,matrix(c(0,0),ncol=2)),dim = c(28,28))
for(i in seq(1,nrow(random_distribution))){
one_pred = predict(decoder,matrix(random_distribution[i,],ncol=2))
predicted = abind(predicted,array_reshape(one_pred,dim = c(28,28)),along = 2)
}
options(repr.plot.width=10, repr.plot.height=1)
grid.raster(predicted,interpolate=FALSE)
| 0.514644 | 0.900004 |
```
import os
import cv2
import math
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, fbeta_score
from keras import optimizers
from keras import backend as K
from keras.models import Sequential, Model, load_model
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Activation, BatchNormalization, GlobalAveragePooling2D, Input
# Set seeds to make the experiment more reproducible.
from tensorflow import set_random_seed
from numpy.random import seed
set_random_seed(0)
seed(0)
%matplotlib inline
sns.set(style="whitegrid")
warnings.filterwarnings("ignore")
train = pd.read_csv('../input/imet-2019-fgvc6/train.csv')
labels = pd.read_csv('../input/imet-2019-fgvc6/labels.csv')
test = pd.read_csv('../input/imet-2019-fgvc6/sample_submission.csv')
train["attribute_ids"] = train["attribute_ids"].apply(lambda x:x.split(" "))
train["id"] = train["id"].apply(lambda x: x + ".png")
test["id"] = test["id"].apply(lambda x: x + ".png")
print('Number of train samples: ', train.shape[0])
print('Number of test samples: ', test.shape[0])
print('Number of labels: ', labels.shape[0])
display(train.head())
display(labels.head())
```
### Model parameters
```
# Model parameters
BATCH_SIZE = 128
HEIGHT = 156
WIDTH = 156
CANAL = 3
N_CLASSES = labels.shape[0]
train_datagen=ImageDataGenerator(rescale=1./255)
train_generator=train_datagen.flow_from_dataframe(
dataframe=train,
directory="../input/imet-2019-fgvc6/train",
x_col="id",
y_col="attribute_ids",
batch_size=BATCH_SIZE,
shuffle=True,
class_mode="categorical",
target_size=(HEIGHT, WIDTH),
subset='training')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_dataframe(
dataframe=test,
directory = "../input/imet-2019-fgvc6/test",
x_col="id",
target_size=(HEIGHT, WIDTH),
batch_size=1,
shuffle=False,
class_mode=None)
```
### Model
```
def create_model(input_shape, n_out):
input_tensor = Input(shape=input_shape)
base_model = applications.Xception(weights=None, include_top=False,
input_tensor=input_tensor)
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5)(x)
final_output = Dense(n_out, activation='sigmoid', name='final_output')(x)
model = Model(input_tensor, final_output)
return model
weights_paths = ['../input/imet-xcecption-pretrained/Xcecption_fold1.h5', '../input/imet-xcecption-pretrained/Xcecption_fold2.h5',
'../input/imet-xcecption-pretrained/Xcecption_fold3.h5', '../input/imet-xcecption-pretrained/Xcecption_fold4.h5',
'../input/imet-xcecption-pretrained/Xcecption_fold5.h5']
model = create_model(input_shape=(HEIGHT, WIDTH, CANAL), n_out=N_CLASSES)
STEP_SIZE_TEST = test_generator.n//test_generator.batch_size
preds = np.empty((0, N_CLASSES))
n_folds = len(weights_paths)
threshold = 0.1
```
### Apply model to test set and output predictions
```
for weights_path in weights_paths:
model.load_weights(weights_path)
test_generator.reset()
preds = model.predict_generator(test_generator, steps=STEP_SIZE_TEST)
preds = preds / n_folds
predictions = []
for pred_ar in preds:
valid = []
for idx, pred in enumerate(pred_ar):
if pred > threshold:
valid.append(idx)
if len(valid) == 0:
valid.append(np.argmax(pred_ar))
predictions.append(valid)
filenames = test_generator.filenames
label_map = {train_generator.class_indices[k] : k for k in train_generator.class_indices}
results = pd.DataFrame({'id':filenames, 'attribute_ids':predictions})
results['id'] = results['id'].map(lambda x: str(x)[:-4])
results['attribute_ids'] = results['attribute_ids'].apply(lambda x: list(map(label_map.get, x)))
results["attribute_ids"] = results["attribute_ids"].apply(lambda x: ' '.join(x))
results.to_csv('submission.csv',index=False)
results.head(10)
```
|
github_jupyter
|
import os
import cv2
import math
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, fbeta_score
from keras import optimizers
from keras import backend as K
from keras.models import Sequential, Model, load_model
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, Activation, BatchNormalization, GlobalAveragePooling2D, Input
# Set seeds to make the experiment more reproducible.
from tensorflow import set_random_seed
from numpy.random import seed
set_random_seed(0)
seed(0)
%matplotlib inline
sns.set(style="whitegrid")
warnings.filterwarnings("ignore")
train = pd.read_csv('../input/imet-2019-fgvc6/train.csv')
labels = pd.read_csv('../input/imet-2019-fgvc6/labels.csv')
test = pd.read_csv('../input/imet-2019-fgvc6/sample_submission.csv')
train["attribute_ids"] = train["attribute_ids"].apply(lambda x:x.split(" "))
train["id"] = train["id"].apply(lambda x: x + ".png")
test["id"] = test["id"].apply(lambda x: x + ".png")
print('Number of train samples: ', train.shape[0])
print('Number of test samples: ', test.shape[0])
print('Number of labels: ', labels.shape[0])
display(train.head())
display(labels.head())
# Model parameters
BATCH_SIZE = 128
HEIGHT = 156
WIDTH = 156
CANAL = 3
N_CLASSES = labels.shape[0]
train_datagen=ImageDataGenerator(rescale=1./255)
train_generator=train_datagen.flow_from_dataframe(
dataframe=train,
directory="../input/imet-2019-fgvc6/train",
x_col="id",
y_col="attribute_ids",
batch_size=BATCH_SIZE,
shuffle=True,
class_mode="categorical",
target_size=(HEIGHT, WIDTH),
subset='training')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_dataframe(
dataframe=test,
directory = "../input/imet-2019-fgvc6/test",
x_col="id",
target_size=(HEIGHT, WIDTH),
batch_size=1,
shuffle=False,
class_mode=None)
def create_model(input_shape, n_out):
input_tensor = Input(shape=input_shape)
base_model = applications.Xception(weights=None, include_top=False,
input_tensor=input_tensor)
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5)(x)
final_output = Dense(n_out, activation='sigmoid', name='final_output')(x)
model = Model(input_tensor, final_output)
return model
weights_paths = ['../input/imet-xcecption-pretrained/Xcecption_fold1.h5', '../input/imet-xcecption-pretrained/Xcecption_fold2.h5',
'../input/imet-xcecption-pretrained/Xcecption_fold3.h5', '../input/imet-xcecption-pretrained/Xcecption_fold4.h5',
'../input/imet-xcecption-pretrained/Xcecption_fold5.h5']
model = create_model(input_shape=(HEIGHT, WIDTH, CANAL), n_out=N_CLASSES)
STEP_SIZE_TEST = test_generator.n//test_generator.batch_size
preds = np.empty((0, N_CLASSES))
n_folds = len(weights_paths)
threshold = 0.1
for weights_path in weights_paths:
model.load_weights(weights_path)
test_generator.reset()
preds = model.predict_generator(test_generator, steps=STEP_SIZE_TEST)
preds = preds / n_folds
predictions = []
for pred_ar in preds:
valid = []
for idx, pred in enumerate(pred_ar):
if pred > threshold:
valid.append(idx)
if len(valid) == 0:
valid.append(np.argmax(pred_ar))
predictions.append(valid)
filenames = test_generator.filenames
label_map = {train_generator.class_indices[k] : k for k in train_generator.class_indices}
results = pd.DataFrame({'id':filenames, 'attribute_ids':predictions})
results['id'] = results['id'].map(lambda x: str(x)[:-4])
results['attribute_ids'] = results['attribute_ids'].apply(lambda x: list(map(label_map.get, x)))
results["attribute_ids"] = results["attribute_ids"].apply(lambda x: ' '.join(x))
results.to_csv('submission.csv',index=False)
results.head(10)
| 0.530966 | 0.515742 |
```
"""
Created on Sun Feb 21 2021
@author: Sahand-j
"""
import numpy as np
import pandas as pd
import pandas_datareader,datetime
import pandas_datareader.data as web
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import date
import nltk
nltk.download('vader_lexicon')
from bs4 import BeautifulSoup
from urllib.request import urlopen, Request
from nltk.sentiment import SentimentIntensityAnalyzer
from sqlalchemy import create_engine
import yahoo_fin.stock_info as si
import psycopg2
stocks = 'voo,vti,jpm,iipr,vt,vxus,tgt,dfs,schd,dgro,nobl,schb,spy,nsc,sdy,gm,unp,qqq,land,aapl,stor,tsla,amat,avgo'
def format_ticker_list(ticker_list):
"""
Helper method to format string stock input
:@return(list): returns list of stock tickers
"""
return ticker_list.upper().split(',')
def stock_headline_scraper_dict(ticker_list):
"""
Webscrapes new stock headlines from finviz.com, should be ran daily to collect new data
:@return(dict): returns dictionary of stok tickers and their assoicated news headlines for availabe dates
"""
website_url = 'https://finviz.com/quote.ashx?t='
news_tables_dict = {}
for ticker in ticker_list:
#URL for each stock
url = website_url + ticker
#requesting url for each ticker
response = urlopen(Request(url=url, headers={'user-agent': 'sentiment-analysis-app'}))
#html parser, using bs4. downloaded the html
html = BeautifulSoup(response,'html')
#the body that contains all the news article links
news_table_html_body = html.find(id = 'news-table')
#each stock is in dict with value corresponding to news table
news_tables_dict.update({ ticker.upper() : news_table_html_body })
return news_tables_dict
ticker_headline_dict = stock_headline_scraper_dict(format_ticker_list(stocks))
def stock_sentiment_df(news_tables_dict):
"""
Takes in dictionary of stock tickers (K)
and their associated headlines (V) from stock_headline_scraper_dict(ticker_list)
:@return(datafame): returns df of stock tickers and their assoicated news headlines, and sentiment score
"""
parsed_data = []
#itter over key and value pairs in dict
for ticker, news_tables_dict in news_tables_dict.items():
for row in news_tables_dict.find_all('tr'):
#title is in acnchor tag 'a', retrieving that from bs4 obj row
title = row.a.text
#time stamps have td tags
timestamp = row.td.text
#no date information
if(len(timestamp.split(' ')) == 1):
time = timestamp.split(' ')[0]
#has date info, before time
else:
date = timestamp.split(' ')[0]
time = timestamp.split(' ')[1]
parsed_data.append([ticker,title,date,time])
df = pd.DataFrame(parsed_data,columns=['ticker','title','date','time'])
#compund score for each article title
vader = SentimentIntensityAnalyzer()
df['comp_score'] = df['title'].apply(lambda title : vader.polarity_scores(title)['compound'])
for i in df.index:
df.at[i, 'time'] = df['time'][i][0:7]
#converting string time col to datetime obj
df['time'] = pd.to_datetime(df['time']).dt.strftime('%H:%M:%S')
df['date'] = pd.to_datetime(df.date).dt.date
#df = df.set_index('date')
df['updated'] = pd.to_datetime('now')
#filtering nuetral news out of df
df = df[df.comp_score != 0]
#columns of interest
df = df[['date','ticker','comp_score','title','updated']]
return df
all_sentiment_df = stock_sentiment_df(ticker_headline_dict)
all_sentiment_df['date'] = pd.to_datetime(all_sentiment_df.date)
all_sentiment_df.set_index('date',inplace=True)
all_sentiment_df.head()
def all_stocks_and_senti_df(ticker_headline_dict):
"""
Inputs dict from stock_headline_scraper_dict()
@return(dataframe): df of all stocks in portfolio, and their available senti score per days
"""
all_sentiment_df = stock_sentiment_df(ticker_headline_dict)
mean_df = all_sentiment_df.groupby(['ticker','date']).mean()
mean_df = mean_df.unstack()
mean_df = mean_df.xs(key='comp_score',axis = 1).transpose()
return mean_df
total_df = all_stocks_and_senti_df(ticker_headline_dict)
total_df.head(3)
def stock_prices_dict(ticker_list):
"""
Inputs formatted ticker list, and creates dict of stock tickers (K) and their price dataframe (V)
:@return(dict): returns dictionary of stock tickers and their associated available historic price data
"""
dict_of_dfs = {}
for i in ticker_list:
temp_df = si.get_data(i)
temp_df['rolling_mean'] = temp_df['adjclose'].rolling(round(len(temp_df)*.15)).mean()
temp_df['rolling_std'] = temp_df['adjclose'].rolling(round(len(temp_df)*.15)).std()
temp_df['cumel_return'] = (1 + temp_df['adjclose'].pct_change(1)).cumprod()
temp_df['updated'] = pd.to_datetime('now')
dict_of_dfs.update({i.upper() : temp_df})
return dict_of_dfs
def update_all_stocks_price(formatted_ticker_list):
"""
Inputs formatted ticker list, inserts all stocks and their price history into db
:@return(string): confirmation message that query executed
"""
engine = create_engine('postgresql://postgres:postgres@localhost:5432/Stocks')
stock_df_and_ticker_dict = stock_prices_dict(formatted_ticker_list)
#dropping table and instering new info
if engine.has_table('total_stock_prices') == True:
engine.execute('DROP TABLE total_stock_prices CASCADE;')
for i in formatted_ticker_list:
stock_df_and_ticker_dict.get(i).to_sql('total_stock_prices', engine, if_exists='append')
return 'all stocks price data is updated in Stocks database'
update_all_stocks_price(format_ticker_list(stocks))
def update_my_portfolio_data_to_db(stock_list):
"""
inputs ticker list, and updates db with new stock headlines relating to current portfolio
:@return(string): confirmation message that query executed
"""
engine = create_engine('postgresql://postgres:postgres@localhost:5432/Stocks')
df = stock_sentiment_df(stock_headline_scraper_dict(format_ticker_list(stock_list)))
if engine.has_table('new_data_with_old') == True:
engine.execute('DROP TABLE new_data_with_old CASCADE;')
df.to_sql('new_data_with_old', engine, if_exists='replace')
else:
df.to_sql('new_data_with_old', engine, if_exists='replace')
view_query = """
create view new_sentiment_data_view as
select
new_data_with_old.date,
new_data_with_old.ticker,
new_data_with_old.comp_score,
new_data_with_old.title,
new_data_with_old.updated
from new_data_with_old
left join main_stock_sentiment_data ON
new_data_with_old.date = main_stock_sentiment_data.date AND
new_data_with_old.ticker = main_stock_sentiment_data.ticker AND
new_data_with_old.title = main_stock_sentiment_data.title
WHERE main_stock_sentiment_data.date IS null; """
add_new_vals_to_senti_table_query = """
insert into main_stock_sentiment_data
select *
from new_sentiment_data_view;
"""
engine.execute(view_query);
engine.execute(add_new_vals_to_senti_table_query);
return 'Updated current stock headlines'
update_my_portfolio_data_to_db(stocks)
def count_num_sentiment_per_stock(stocks):
"""
Helper method to count amount of data of sentiment scores of each stock ticker
@return(dataframe) df of stock ticker and the number of accumulated sentiment scores data
"""
concat_cases = ''
sent_case_per_ticker = """
SUM(CASE ticker
WHEN '{ticker}'
THEN 1 ELSE 0 END)
AS {ticker_count_col} """
#building string to format as query
for i in format_ticker_list(stocks):
concat_cases += sent_case_per_ticker.format(ticker= i , ticker_count_col= i+',')
if(i == format_ticker_list(stocks)[-1]):
concat_cases += sent_case_per_ticker.format(ticker= i , ticker_count_col= i)
final_query = 'SELECT' + concat_cases + 'FROM main_stock_sentiment_data;'
engine = create_engine('postgresql://postgres:postgres@localhost:5432/Stocks')
df = pd.read_sql_query(final_query,con=engine).T
df.rename(columns = {'0': 'Sentiment_data_count'},inplace=True)
df.columns = ['Sentiment_data_count']
return df
count_num_sentiment_per_stock(stocks).head()
def group_sentiments_df():
"""
Queries a group by statement from db to average sentiment scores per day per stock
@return(dataframe) of stock ticker and combined sentiment score per day
"""
engine = create_engine('postgresql://postgres:postgres@localhost:5432/Stocks')
grouping_senti = """
select date, ticker, avg(comp_score)
from main_stock_sentiment_data
group by date,ticker
order by ticker asc;"""
grouped_senti_data_df = pd.read_sql_query(grouping_senti,con=engine)
return grouped_senti_data_df
group_sentiments_df().head()
def return_stocks_most_senti_data_dict(stocks):
"""
Inputs stock tickers, used to analyze inidivdual stocks and their combined
daily sentiment score from one data structure
@return(dict) stock tickers (K) and their df with grouped senti score and dates (V)
"""
stock_with_most_data = count_num_sentiment_per_stock(stocks).sort_values(by=['Sentiment_data_count'],ascending=False)[:5]
#print('Current stocks with most sentiment data: ' + str(list(stock_with_most_data.index))+'\n')
dict_of_dfs = {}
for i in list(stock_with_most_data.index):
temp_df = group_sentiments_df()[group_sentiments_df()['ticker'] == i.upper()]
dict_of_dfs.update({i.upper() : temp_df})
return dict_of_dfs
return_stocks_most_senti_data_dict(stocks).get('TSLA').dropna().sort_values(by=['date']).set_index('date').head()
def plot_stock_sentiment_analysis(ticker_list):
"""
Inputs stock tickers with the most sentiment scores, used to plot correlations and graphs of senti correaltions
@return(plots) plots senti score vs adj closed price of associated stock, and correlations
"""
#array version of stocks tickers
stock_list = format_ticker_list(ticker_list)
#string version of stocks
all_stocks_price_dict = return_stocks_most_senti_data_dict(ticker_list)
for i in stock_list:
temp_stock_senti_df = all_stocks_price_dict.get(i).sort_values(by=['date']).set_index('date')
temp_stock_df = stock_prices_dict(stock_list).get(i)
temp_stock_complete_df = temp_stock_df.join(temp_stock_senti_df['avg'], how='inner')
fig=plt.figure(figsize=(20,8))
x = temp_stock_complete_df.index
y1 = temp_stock_complete_df['adjclose']
y2 = temp_stock_complete_df['avg']
ax1 = plt.subplot(221)
ax4 = plt.subplot(222)
ax2 = plt.subplot(223, sharex = ax1)
ax3 = plt.subplot(224)
corr = temp_stock_complete_df[['avg','adjclose','cumel_return','volume']].corr()
ax = sns.heatmap(
corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True, ax = ax3)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right');
sns.regplot(y = temp_stock_complete_df['avg'],x = temp_stock_complete_df['adjclose'], ax=ax4);
ax1.plot(x,y1,label='adj close')
ax2.plot(x,y2,label='avg sentiment')
ax1.set_title('adj close vs sentiment score')
fig.suptitle(str(i));
ax1.legend();
ax2.legend();
# currenly 'tsla,aapl,tgt,jpm,gm' are stocks with have most data for.
print('Stocks with most data: ' + str(list(return_stocks_most_senti_data_dict(stocks).keys())))
stocks_with_most_data = ",".join(list(return_stocks_most_senti_data_dict(stocks).keys()))
#plot statistics
plot_stock_sentiment_analysis(stocks_with_most_data)
```
|
github_jupyter
|
"""
Created on Sun Feb 21 2021
@author: Sahand-j
"""
import numpy as np
import pandas as pd
import pandas_datareader,datetime
import pandas_datareader.data as web
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import date
import nltk
nltk.download('vader_lexicon')
from bs4 import BeautifulSoup
from urllib.request import urlopen, Request
from nltk.sentiment import SentimentIntensityAnalyzer
from sqlalchemy import create_engine
import yahoo_fin.stock_info as si
import psycopg2
stocks = 'voo,vti,jpm,iipr,vt,vxus,tgt,dfs,schd,dgro,nobl,schb,spy,nsc,sdy,gm,unp,qqq,land,aapl,stor,tsla,amat,avgo'
def format_ticker_list(ticker_list):
"""
Helper method to format string stock input
:@return(list): returns list of stock tickers
"""
return ticker_list.upper().split(',')
def stock_headline_scraper_dict(ticker_list):
"""
Webscrapes new stock headlines from finviz.com, should be ran daily to collect new data
:@return(dict): returns dictionary of stok tickers and their assoicated news headlines for availabe dates
"""
website_url = 'https://finviz.com/quote.ashx?t='
news_tables_dict = {}
for ticker in ticker_list:
#URL for each stock
url = website_url + ticker
#requesting url for each ticker
response = urlopen(Request(url=url, headers={'user-agent': 'sentiment-analysis-app'}))
#html parser, using bs4. downloaded the html
html = BeautifulSoup(response,'html')
#the body that contains all the news article links
news_table_html_body = html.find(id = 'news-table')
#each stock is in dict with value corresponding to news table
news_tables_dict.update({ ticker.upper() : news_table_html_body })
return news_tables_dict
ticker_headline_dict = stock_headline_scraper_dict(format_ticker_list(stocks))
def stock_sentiment_df(news_tables_dict):
"""
Takes in dictionary of stock tickers (K)
and their associated headlines (V) from stock_headline_scraper_dict(ticker_list)
:@return(datafame): returns df of stock tickers and their assoicated news headlines, and sentiment score
"""
parsed_data = []
#itter over key and value pairs in dict
for ticker, news_tables_dict in news_tables_dict.items():
for row in news_tables_dict.find_all('tr'):
#title is in acnchor tag 'a', retrieving that from bs4 obj row
title = row.a.text
#time stamps have td tags
timestamp = row.td.text
#no date information
if(len(timestamp.split(' ')) == 1):
time = timestamp.split(' ')[0]
#has date info, before time
else:
date = timestamp.split(' ')[0]
time = timestamp.split(' ')[1]
parsed_data.append([ticker,title,date,time])
df = pd.DataFrame(parsed_data,columns=['ticker','title','date','time'])
#compund score for each article title
vader = SentimentIntensityAnalyzer()
df['comp_score'] = df['title'].apply(lambda title : vader.polarity_scores(title)['compound'])
for i in df.index:
df.at[i, 'time'] = df['time'][i][0:7]
#converting string time col to datetime obj
df['time'] = pd.to_datetime(df['time']).dt.strftime('%H:%M:%S')
df['date'] = pd.to_datetime(df.date).dt.date
#df = df.set_index('date')
df['updated'] = pd.to_datetime('now')
#filtering nuetral news out of df
df = df[df.comp_score != 0]
#columns of interest
df = df[['date','ticker','comp_score','title','updated']]
return df
all_sentiment_df = stock_sentiment_df(ticker_headline_dict)
all_sentiment_df['date'] = pd.to_datetime(all_sentiment_df.date)
all_sentiment_df.set_index('date',inplace=True)
all_sentiment_df.head()
def all_stocks_and_senti_df(ticker_headline_dict):
"""
Inputs dict from stock_headline_scraper_dict()
@return(dataframe): df of all stocks in portfolio, and their available senti score per days
"""
all_sentiment_df = stock_sentiment_df(ticker_headline_dict)
mean_df = all_sentiment_df.groupby(['ticker','date']).mean()
mean_df = mean_df.unstack()
mean_df = mean_df.xs(key='comp_score',axis = 1).transpose()
return mean_df
total_df = all_stocks_and_senti_df(ticker_headline_dict)
total_df.head(3)
def stock_prices_dict(ticker_list):
"""
Inputs formatted ticker list, and creates dict of stock tickers (K) and their price dataframe (V)
:@return(dict): returns dictionary of stock tickers and their associated available historic price data
"""
dict_of_dfs = {}
for i in ticker_list:
temp_df = si.get_data(i)
temp_df['rolling_mean'] = temp_df['adjclose'].rolling(round(len(temp_df)*.15)).mean()
temp_df['rolling_std'] = temp_df['adjclose'].rolling(round(len(temp_df)*.15)).std()
temp_df['cumel_return'] = (1 + temp_df['adjclose'].pct_change(1)).cumprod()
temp_df['updated'] = pd.to_datetime('now')
dict_of_dfs.update({i.upper() : temp_df})
return dict_of_dfs
def update_all_stocks_price(formatted_ticker_list):
"""
Inputs formatted ticker list, inserts all stocks and their price history into db
:@return(string): confirmation message that query executed
"""
engine = create_engine('postgresql://postgres:postgres@localhost:5432/Stocks')
stock_df_and_ticker_dict = stock_prices_dict(formatted_ticker_list)
#dropping table and instering new info
if engine.has_table('total_stock_prices') == True:
engine.execute('DROP TABLE total_stock_prices CASCADE;')
for i in formatted_ticker_list:
stock_df_and_ticker_dict.get(i).to_sql('total_stock_prices', engine, if_exists='append')
return 'all stocks price data is updated in Stocks database'
update_all_stocks_price(format_ticker_list(stocks))
def update_my_portfolio_data_to_db(stock_list):
"""
inputs ticker list, and updates db with new stock headlines relating to current portfolio
:@return(string): confirmation message that query executed
"""
engine = create_engine('postgresql://postgres:postgres@localhost:5432/Stocks')
df = stock_sentiment_df(stock_headline_scraper_dict(format_ticker_list(stock_list)))
if engine.has_table('new_data_with_old') == True:
engine.execute('DROP TABLE new_data_with_old CASCADE;')
df.to_sql('new_data_with_old', engine, if_exists='replace')
else:
df.to_sql('new_data_with_old', engine, if_exists='replace')
view_query = """
create view new_sentiment_data_view as
select
new_data_with_old.date,
new_data_with_old.ticker,
new_data_with_old.comp_score,
new_data_with_old.title,
new_data_with_old.updated
from new_data_with_old
left join main_stock_sentiment_data ON
new_data_with_old.date = main_stock_sentiment_data.date AND
new_data_with_old.ticker = main_stock_sentiment_data.ticker AND
new_data_with_old.title = main_stock_sentiment_data.title
WHERE main_stock_sentiment_data.date IS null; """
add_new_vals_to_senti_table_query = """
insert into main_stock_sentiment_data
select *
from new_sentiment_data_view;
"""
engine.execute(view_query);
engine.execute(add_new_vals_to_senti_table_query);
return 'Updated current stock headlines'
update_my_portfolio_data_to_db(stocks)
def count_num_sentiment_per_stock(stocks):
"""
Helper method to count amount of data of sentiment scores of each stock ticker
@return(dataframe) df of stock ticker and the number of accumulated sentiment scores data
"""
concat_cases = ''
sent_case_per_ticker = """
SUM(CASE ticker
WHEN '{ticker}'
THEN 1 ELSE 0 END)
AS {ticker_count_col} """
#building string to format as query
for i in format_ticker_list(stocks):
concat_cases += sent_case_per_ticker.format(ticker= i , ticker_count_col= i+',')
if(i == format_ticker_list(stocks)[-1]):
concat_cases += sent_case_per_ticker.format(ticker= i , ticker_count_col= i)
final_query = 'SELECT' + concat_cases + 'FROM main_stock_sentiment_data;'
engine = create_engine('postgresql://postgres:postgres@localhost:5432/Stocks')
df = pd.read_sql_query(final_query,con=engine).T
df.rename(columns = {'0': 'Sentiment_data_count'},inplace=True)
df.columns = ['Sentiment_data_count']
return df
count_num_sentiment_per_stock(stocks).head()
def group_sentiments_df():
"""
Queries a group by statement from db to average sentiment scores per day per stock
@return(dataframe) of stock ticker and combined sentiment score per day
"""
engine = create_engine('postgresql://postgres:postgres@localhost:5432/Stocks')
grouping_senti = """
select date, ticker, avg(comp_score)
from main_stock_sentiment_data
group by date,ticker
order by ticker asc;"""
grouped_senti_data_df = pd.read_sql_query(grouping_senti,con=engine)
return grouped_senti_data_df
group_sentiments_df().head()
def return_stocks_most_senti_data_dict(stocks):
"""
Inputs stock tickers, used to analyze inidivdual stocks and their combined
daily sentiment score from one data structure
@return(dict) stock tickers (K) and their df with grouped senti score and dates (V)
"""
stock_with_most_data = count_num_sentiment_per_stock(stocks).sort_values(by=['Sentiment_data_count'],ascending=False)[:5]
#print('Current stocks with most sentiment data: ' + str(list(stock_with_most_data.index))+'\n')
dict_of_dfs = {}
for i in list(stock_with_most_data.index):
temp_df = group_sentiments_df()[group_sentiments_df()['ticker'] == i.upper()]
dict_of_dfs.update({i.upper() : temp_df})
return dict_of_dfs
return_stocks_most_senti_data_dict(stocks).get('TSLA').dropna().sort_values(by=['date']).set_index('date').head()
def plot_stock_sentiment_analysis(ticker_list):
"""
Inputs stock tickers with the most sentiment scores, used to plot correlations and graphs of senti correaltions
@return(plots) plots senti score vs adj closed price of associated stock, and correlations
"""
#array version of stocks tickers
stock_list = format_ticker_list(ticker_list)
#string version of stocks
all_stocks_price_dict = return_stocks_most_senti_data_dict(ticker_list)
for i in stock_list:
temp_stock_senti_df = all_stocks_price_dict.get(i).sort_values(by=['date']).set_index('date')
temp_stock_df = stock_prices_dict(stock_list).get(i)
temp_stock_complete_df = temp_stock_df.join(temp_stock_senti_df['avg'], how='inner')
fig=plt.figure(figsize=(20,8))
x = temp_stock_complete_df.index
y1 = temp_stock_complete_df['adjclose']
y2 = temp_stock_complete_df['avg']
ax1 = plt.subplot(221)
ax4 = plt.subplot(222)
ax2 = plt.subplot(223, sharex = ax1)
ax3 = plt.subplot(224)
corr = temp_stock_complete_df[['avg','adjclose','cumel_return','volume']].corr()
ax = sns.heatmap(
corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True, ax = ax3)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right');
sns.regplot(y = temp_stock_complete_df['avg'],x = temp_stock_complete_df['adjclose'], ax=ax4);
ax1.plot(x,y1,label='adj close')
ax2.plot(x,y2,label='avg sentiment')
ax1.set_title('adj close vs sentiment score')
fig.suptitle(str(i));
ax1.legend();
ax2.legend();
# currenly 'tsla,aapl,tgt,jpm,gm' are stocks with have most data for.
print('Stocks with most data: ' + str(list(return_stocks_most_senti_data_dict(stocks).keys())))
stocks_with_most_data = ",".join(list(return_stocks_most_senti_data_dict(stocks).keys()))
#plot statistics
plot_stock_sentiment_analysis(stocks_with_most_data)
| 0.41561 | 0.36832 |
# Creating a new process - Orographic precipitation

```
import numpy as np
import matplotlib.pyplot as plt
import xsimlab as xs # modeling framework used for FastScape development
import xarray as xr # xarray is a python package to work with labelled multi-dimensional arrays
%load_ext xsimlab.ipython
```
We have developed an [orographic precipitation model](https://github.com/fastscape-lem/orographic-precipitation) based on [Smith and Barstad (2004)](https://journals.ametsoc.org/view/journals/atsc/61/12/1520-0469_2004_061_1377_altoop_2.0.co_2.xml) that can easily be included in any landscape evolution model.
We will use it to demonstrate how new processes can be constructed and added to FastScape.
Note that the orographic model can be access through conda using ```conda install orographic_precipitation -c conda-forge```
```
from orographic_precipitation import compute_orographic_precip
```
We now develop a very simple process called ```Orography``` that simply takes wind direction as an input and produces a precipitation pattern over a given landscape (topography) of known dimensions
```
from fastscape.processes import RasterGrid2D, SurfaceTopography, FlowAccumulator
@xs.process
class Orography:
wind_dir = xs.variable(intent='in', description='wind direction (0 is north)')
precip = xs.foreign(FlowAccumulator, 'runoff', intent='out')
dx = xs.foreign(RasterGrid2D, 'dx')
dy = xs.foreign(RasterGrid2D, 'dy')
h = xs.foreign(SurfaceTopography, 'elevation')
@xs.runtime()
def run_step(self):
lapse_rate = -5.8
lapse_rate_m = -6.5
ref_density = 7.4e-3
param = {
'latitude': 40,
'precip_base': 7, # uniform precipitation rate
'wind_speed': 10,
'wind_dir': self.wind_dir, # wind direction (270: west)
'conv_time': 1000, # conversion time
'fall_time': 1000, # fallout time
'nm': 0.005, # moist stability frequency
'hw': 5000, # water vapor scale height
'cw': ref_density * lapse_rate_m / lapse_rate # uplift sensitivity
}
self.precip = compute_orographic_precip(self.h, self.dx, self.dy, **param)
from fastscape.models import basic_model
basic_model.drainage
```
We improve the basic_model by substiting the computation of drainage area with a FlowAccumulator which transform any precipitation function into a accumulated flow and by adding our Orography model that takes a wind direction to produce a precipitation function.
```
spl_model = basic_model.update_processes({'drainage': FlowAccumulator, 'orography': Orography})
```
Let's explore this new model
```
spl_model.drainage
```
We run the model with a wind direction from the South
```
# %create_setup spl_model --default --verbose
import xsimlab as xs
ds_in = xs.create_setup(
model=spl_model,
clocks={'time': np.linspace(0,2e7,201),
'out': np.linspace(0, 2e7, 21)},
master_clock = 'time',
input_vars={
# nb. of grid nodes in (y, x)
'grid__shape': [101,201],
# total grid length in (y, x)
'grid__length': [1e5,2e5],
# node status at borders
'boundary__status': 'fixed_value',
# uplift rate
'uplift__rate': 1e-3,
# random seed
'init_topography__seed': None,
# diffusivity (transport coefficient)
'diffusion__diffusivity': 1,
# bedrock channel incision coefficient
'spl__k_coef': 1e-5,
# drainage area exponent
'spl__area_exp': 0.4,
# slope exponent
'spl__slope_exp': 1,
# wind direction (0 is north)
'orography__wind_dir': 180,
},
output_vars={
'topography__elevation': 'out',
'drainage__flowacc': 'out',
'drainage__runoff': 'out'}
)
with xs.monitoring.ProgressBar():
ds_out = ds_in.xsimlab.run(model=spl_model)
from ipyfastscape import TopoViz3d
app = TopoViz3d(ds_out, canvas_height=600, time_dim="out")
app.components['background_color'].set_color('lightgray')
app.components['vertical_exaggeration'].set_factor(5)
app.components['timestepper'].go_to_time(ds_out.out[-1])
app.show()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import xsimlab as xs # modeling framework used for FastScape development
import xarray as xr # xarray is a python package to work with labelled multi-dimensional arrays
%load_ext xsimlab.ipython
We now develop a very simple process called ```Orography``` that simply takes wind direction as an input and produces a precipitation pattern over a given landscape (topography) of known dimensions
We improve the basic_model by substiting the computation of drainage area with a FlowAccumulator which transform any precipitation function into a accumulated flow and by adding our Orography model that takes a wind direction to produce a precipitation function.
Let's explore this new model
We run the model with a wind direction from the South
| 0.54577 | 0.963265 |
# 3D map simulation
## Prerequisites
- Knowledge of 3D extraction and datasets used in gammapy, see for instance the [first analysis tutorial](../../starting/analysis_1.ipynb)
## Context
To simulate a specific observation, it is not always necessary to simulate the full photon list. For many uses cases, simulating directly a reduced binned dataset is enough: the IRFs reduced in the correct geometry are combined with a source model to predict an actual number of counts per bin. The latter is then used to simulate a reduced dataset using Poisson probability distribution.
This can be done to check the feasibility of a measurement (performance / sensitivity study), to test whether fitted parameters really provide a good fit to the data etc.
Here we will see how to perform a 3D simulation of a CTA observation, assuming both the spectral and spatial morphology of an observed source.
**Objective: simulate a 3D observation of a source with CTA using the CTA 1DC response and fit it with the assumed source model.**
## Proposed approach:
Here we can't use the regular observation objects that are connected to a `DataStore`. Instead we will create a fake `~gammapy.data.Observation` that contain some pointing information and the CTA 1DC IRFs (that are loaded with `~gammapy.irf.load_cta_irfs`).
Then we will create a `~gammapy.datasets.MapDataset` geometry and create it with the `~gammapy.makers.MapDatasetMaker`.
Then we will be able to define a model consisting of a `~gammapy.modeling.models.PowerLawSpectralModel` and a `~gammapy.modeling.models.GaussianSpatialModel`. We will assign it to the dataset and fake the count data.
## Imports and versions
```
%matplotlib inline
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from gammapy.irf import load_cta_irfs
from gammapy.maps import WcsGeom, MapAxis
from gammapy.modeling.models import (
PowerLawSpectralModel,
GaussianSpatialModel,
SkyModel,
Models,
FoVBackgroundModel,
)
from gammapy.makers import MapDatasetMaker, SafeMaskMaker
from gammapy.modeling import Fit
from gammapy.data import Observation
from gammapy.datasets import MapDataset
!gammapy info --no-envvar --no-dependencies --no-system
```
## Simulation
We will simulate using the CTA-1DC IRFs shipped with gammapy. Note that for dedictaed CTA simulations, you can simply use [`Observation.from_caldb()`]() without having to externally load the IRFs
```
# Loading IRFs
irfs = load_cta_irfs(
"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
)
# Define the observation parameters (typically the observation duration and the pointing position):
livetime = 2.0 * u.hr
pointing = SkyCoord(0, 0, unit="deg", frame="galactic")
# Define map geometry for binned simulation
energy_reco = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 10), unit="TeV", name="energy", interp="log"
)
geom = WcsGeom.create(
skydir=(0, 0),
binsz=0.02,
width=(6, 6),
frame="galactic",
axes=[energy_reco],
)
# It is usually useful to have a separate binning for the true energy axis
energy_true = MapAxis.from_edges(
np.logspace(-1.5, 1.5, 30), unit="TeV", name="energy", interp="log"
)
empty = MapDataset.create(geom, name="dataset-simu")
# Define sky model to used simulate the data.
# Here we use a Gaussian spatial model and a Power Law spectral model.
spatial_model = GaussianSpatialModel(
lon_0="0.2 deg", lat_0="0.1 deg", sigma="0.3 deg", frame="galactic"
)
spectral_model = PowerLawSpectralModel(
index=3, amplitude="1e-11 cm-2 s-1 TeV-1", reference="1 TeV"
)
model_simu = SkyModel(
spatial_model=spatial_model,
spectral_model=spectral_model,
name="model-simu",
)
bkg_model = FoVBackgroundModel(dataset_name="dataset-simu")
models = Models([model_simu, bkg_model])
print(models)
```
Now, comes the main part of dataset simulation. We create an in-memory observation and an empty dataset. We then predict the number of counts for the given model, and Poission fluctuate it using `fake()` to make a simulated counts maps. Keep in mind that it is important to specify the `selection` of the maps that you want to produce
```
# Create an in-memory observation
obs = Observation.create(pointing=pointing, livetime=livetime, irfs=irfs)
print(obs)
# Make the MapDataset
maker = MapDatasetMaker(selection=["exposure", "background", "psf", "edisp"])
maker_safe_mask = SafeMaskMaker(methods=["offset-max"], offset_max=4.0 * u.deg)
dataset = maker.run(empty, obs)
dataset = maker_safe_mask.run(dataset, obs)
print(dataset)
# Add the model on the dataset and Poission fluctuate
dataset.models = models
dataset.fake()
# Do a print on the dataset - there is now a counts maps
print(dataset)
```
Now use this dataset as you would in all standard analysis. You can plot the maps, or proceed with your custom analysis.
In the next section, we show the standard 3D fitting as in [analysis_3d](analysis_3d.ipynb).
```
# To plot, eg, counts:
dataset.counts.smooth(0.05 * u.deg).plot_interactive(
add_cbar=True, stretch="linear"
)
```
## Fit
In this section, we do a usual 3D fit with the same model used to simulated the data and see the stability of the simulations. Often, it is useful to simulate many such datasets and look at the distribution of the reconstructed parameters.
```
models_fit = models.copy()
# We do not want to fit the background in this case, so we will freeze the parameters
models_fit["dataset-simu-bkg"].spectral_model.norm.frozen = True
models_fit["dataset-simu-bkg"].spectral_model.tilt.frozen = True
dataset.models = models_fit
print(dataset.models)
%%time
fit = Fit([dataset])
result = fit.run(optimize_opts={"print_level": 1})
dataset.plot_residuals_spatial(method="diff/sqrt(model)", vmin=-0.5, vmax=0.5)
```
Compare the injected and fitted models:
```
print(
"True model: \n",
model_simu,
"\n\n Fitted model: \n",
models_fit["model-simu"],
)
```
Get the errors on the fitted parameters from the parameter table
```
result.parameters.to_table()
```
|
github_jupyter
|
%matplotlib inline
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from gammapy.irf import load_cta_irfs
from gammapy.maps import WcsGeom, MapAxis
from gammapy.modeling.models import (
PowerLawSpectralModel,
GaussianSpatialModel,
SkyModel,
Models,
FoVBackgroundModel,
)
from gammapy.makers import MapDatasetMaker, SafeMaskMaker
from gammapy.modeling import Fit
from gammapy.data import Observation
from gammapy.datasets import MapDataset
!gammapy info --no-envvar --no-dependencies --no-system
# Loading IRFs
irfs = load_cta_irfs(
"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
)
# Define the observation parameters (typically the observation duration and the pointing position):
livetime = 2.0 * u.hr
pointing = SkyCoord(0, 0, unit="deg", frame="galactic")
# Define map geometry for binned simulation
energy_reco = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 10), unit="TeV", name="energy", interp="log"
)
geom = WcsGeom.create(
skydir=(0, 0),
binsz=0.02,
width=(6, 6),
frame="galactic",
axes=[energy_reco],
)
# It is usually useful to have a separate binning for the true energy axis
energy_true = MapAxis.from_edges(
np.logspace(-1.5, 1.5, 30), unit="TeV", name="energy", interp="log"
)
empty = MapDataset.create(geom, name="dataset-simu")
# Define sky model to used simulate the data.
# Here we use a Gaussian spatial model and a Power Law spectral model.
spatial_model = GaussianSpatialModel(
lon_0="0.2 deg", lat_0="0.1 deg", sigma="0.3 deg", frame="galactic"
)
spectral_model = PowerLawSpectralModel(
index=3, amplitude="1e-11 cm-2 s-1 TeV-1", reference="1 TeV"
)
model_simu = SkyModel(
spatial_model=spatial_model,
spectral_model=spectral_model,
name="model-simu",
)
bkg_model = FoVBackgroundModel(dataset_name="dataset-simu")
models = Models([model_simu, bkg_model])
print(models)
# Create an in-memory observation
obs = Observation.create(pointing=pointing, livetime=livetime, irfs=irfs)
print(obs)
# Make the MapDataset
maker = MapDatasetMaker(selection=["exposure", "background", "psf", "edisp"])
maker_safe_mask = SafeMaskMaker(methods=["offset-max"], offset_max=4.0 * u.deg)
dataset = maker.run(empty, obs)
dataset = maker_safe_mask.run(dataset, obs)
print(dataset)
# Add the model on the dataset and Poission fluctuate
dataset.models = models
dataset.fake()
# Do a print on the dataset - there is now a counts maps
print(dataset)
# To plot, eg, counts:
dataset.counts.smooth(0.05 * u.deg).plot_interactive(
add_cbar=True, stretch="linear"
)
models_fit = models.copy()
# We do not want to fit the background in this case, so we will freeze the parameters
models_fit["dataset-simu-bkg"].spectral_model.norm.frozen = True
models_fit["dataset-simu-bkg"].spectral_model.tilt.frozen = True
dataset.models = models_fit
print(dataset.models)
%%time
fit = Fit([dataset])
result = fit.run(optimize_opts={"print_level": 1})
dataset.plot_residuals_spatial(method="diff/sqrt(model)", vmin=-0.5, vmax=0.5)
print(
"True model: \n",
model_simu,
"\n\n Fitted model: \n",
models_fit["model-simu"],
)
result.parameters.to_table()
| 0.807992 | 0.987314 |
# Introduction to the Keras Tuner
## Overview
Keras Tuner is a library that helps to pick the optimal set of hyperparameters for TensorFlow program. The process of selecting the right set of hyperparameters for machine learning (ML) application is called *hyperparameter tuning* or *hypertuning*.
Hyperparameters are the variables that govern the training process and the topology of an ML model. These variables remain constant over the training process and directly impact the performance of ML program. Hyperparameters are of two types:
1. **Model hyperparameters** which influence model selection such as the number and width of hidden layers
2. **Algorithm hyperparameters** which influence the speed and quality of the learning algorithm such as the learning rate for Stochastic Gradient Descent (SGD) and the number of nearest neighbors for a k Nearest Neighbors (KNN) classifier
## Setup
```
import tensorflow as tf
from tensorflow import keras
```
Install and import the Keras Tuner.
```
!pip install -q -U keras-tuner
import keras_tuner as kt
```
## Download and prepare the dataset
Use the Keras Tuner to find the best hyperparameters for a machine learning model that classifies images of clothing from the [Fashion MNIST dataset]
Load the data.
```
(img_train, label_train), (img_test, label_test) = keras.datasets.fashion_mnist.load_data()
# Normalize pixel values between 0 and 1
img_train = img_train.astype('float32') / 255.0
img_test = img_test.astype('float32') / 255.0
```
## Define the model
When building a model for hypertuning, the defination of the hyperparameter search space in addition to the model architecture is also determined. The model set up for hypertuning is called a *hypermodel*.
Define a hypermodel through two approaches:
* By using a model builder function
* By subclassing the `HyperModel` class of the Keras Tuner API
```
def model_builder(hp):
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28)))
# Tune the number of units in the first Dense layer
# Choose an optimal value between 32-512
hp_units = hp.Int('units', min_value=32, max_value=512, step=32)
model.add(keras.layers.Dense(units=hp_units, activation='relu'))
model.add(keras.layers.Dense(10))
# Tune the learning rate for the optimizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
```
## Instantiate the tuner and perform hypertuning
Instantiate the tuner to perform the hypertuning. The Keras Tuner has four tuners available - `RandomSearch`, `Hyperband`, `BayesianOptimization`, and `Sklearn`. [Hyperband](https://arxiv.org/pdf/1603.06560.pdf) tuner is used in this tutorial.
To instantiate the Hyperband tuner, specify the hypermodel and the `objective` to optimize and the maximum number of epochs to train (`max_epochs`).
```
tuner = kt.Hyperband(model_builder,
objective='val_accuracy',
max_epochs=10,
factor=3,
directory='my_dir',
project_name='intro_to_kt')
```
The Hyperband tuning algorithm uses adaptive resource allocation and early-stopping to quickly converge on a high-performing model. This is done using a sports championship style bracket. The algorithm trains a large number of models for a few epochs and carries forward only the top-performing half of models to the next round. Hyperband determines the number of models to train in a bracket by computing 1 + log<sub>`factor`</sub>(`max_epochs`) and rounding it up to the nearest integer.
Create a callback to stop training early after reaching a certain value for the validation loss.
```
stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
```
Run the hyperparameter search. The arguments for the search method are the same as those used for `tf.keras.model.fit` in addition to the callback above.
```
tuner.search(img_train, label_train, epochs=50, validation_split=0.2, callbacks=[stop_early])
# Get the optimal hyperparameters
best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]
print(f"""
The hyperparameter search is complete. The optimal number of units in the first densely-connected
layer is {best_hps.get('units')} and the optimal learning rate for the optimizer
is {best_hps.get('learning_rate')}.
""")
```
## Train the model
Find the optimal number of epochs to train the model with the hyperparameters obtained from the search.
```
# Build the model with the optimal hyperparameters and train it on the data for 50 epochs
model = tuner.hypermodel.build(best_hps)
history = model.fit(img_train, label_train, epochs=50, validation_split=0.2)
val_acc_per_epoch = history.history['val_accuracy']
best_epoch = val_acc_per_epoch.index(max(val_acc_per_epoch)) + 1
print('Best epoch: %d' % (best_epoch,))
```
Re-instantiate the hypermodel and train it with the optimal number of epochs from above.
```
hypermodel = tuner.hypermodel.build(best_hps)
# Retrain the model
hypermodel.fit(img_train, label_train, epochs=best_epoch, validation_split=0.2)
```
To finish this tutorial, evaluate the hypermodel on the test data.
```
eval_result = hypermodel.evaluate(img_test, label_test)
print("[test loss, test accuracy]:", eval_result)
```
The `my_dir/intro_to_kt` directory contains detailed logs and checkpoints for every trial (model configuration) run during the hyperparameter search. If re-run the hyperparameter search, the Keras Tuner uses the existing state from these logs to resume the search. To disable this behavior, pass an additional `overwrite=True` argument while instantiating the tuner.
|
github_jupyter
|
import tensorflow as tf
from tensorflow import keras
!pip install -q -U keras-tuner
import keras_tuner as kt
(img_train, label_train), (img_test, label_test) = keras.datasets.fashion_mnist.load_data()
# Normalize pixel values between 0 and 1
img_train = img_train.astype('float32') / 255.0
img_test = img_test.astype('float32') / 255.0
def model_builder(hp):
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28)))
# Tune the number of units in the first Dense layer
# Choose an optimal value between 32-512
hp_units = hp.Int('units', min_value=32, max_value=512, step=32)
model.add(keras.layers.Dense(units=hp_units, activation='relu'))
model.add(keras.layers.Dense(10))
# Tune the learning rate for the optimizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
tuner = kt.Hyperband(model_builder,
objective='val_accuracy',
max_epochs=10,
factor=3,
directory='my_dir',
project_name='intro_to_kt')
stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
tuner.search(img_train, label_train, epochs=50, validation_split=0.2, callbacks=[stop_early])
# Get the optimal hyperparameters
best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]
print(f"""
The hyperparameter search is complete. The optimal number of units in the first densely-connected
layer is {best_hps.get('units')} and the optimal learning rate for the optimizer
is {best_hps.get('learning_rate')}.
""")
# Build the model with the optimal hyperparameters and train it on the data for 50 epochs
model = tuner.hypermodel.build(best_hps)
history = model.fit(img_train, label_train, epochs=50, validation_split=0.2)
val_acc_per_epoch = history.history['val_accuracy']
best_epoch = val_acc_per_epoch.index(max(val_acc_per_epoch)) + 1
print('Best epoch: %d' % (best_epoch,))
hypermodel = tuner.hypermodel.build(best_hps)
# Retrain the model
hypermodel.fit(img_train, label_train, epochs=best_epoch, validation_split=0.2)
eval_result = hypermodel.evaluate(img_test, label_test)
print("[test loss, test accuracy]:", eval_result)
| 0.904458 | 0.990244 |
```
%tensorflow_version 2 # This tells Colab to use TF2
```
# Test Active Wave Breaking Classifier
Test the classifier.
The data needs to be in a folder called "test" which has sub-folders "0" and "1"
For example:
```
test
├───0
├───1
```
We will need to download a pre-trained model
```
# PROGRAM : test_wave_breaking_classifier.py
# POURPOSE : classify wave breaking using a convnets
# AUTHOR : Caio Eadi Stringari
# EMAIL : caio.stringari@gmail.com
# V1.0 : 05/05/2020 [Caio Stringari]
```
## Data Download
Load data from my google drive. You may need to copy the data over to yours to use this program.
```
from google.colab import drive
drive.mount('/content/drive')
!cp "/content/drive/My Drive/Colab Notebooks/FEM/data/test.tar.gz" .
!tar -zxf test.tar.gz
```
## Imports
```
import os
import numpy as np
import tensorflow as tf
import pathlib
from sklearn.metrics import classification_report
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
```
## Load the model
```
modelpath = "/content/drive/My Drive/Colab Notebooks/FEM/InceptionResNetV2Baseline.h5"
model = tf.keras.models.load_model(modelpath)
```
## Keras Data Generator
Load the teset data
The augumentations steps are:
1. Rescale in the range [0, 1]
Keras generators will take care of the rest this for us.
```
def show_batch(image_batch, label_batch):
fig = plt.figure(figsize=(10, 10))
for n in range(len(image_batch)):
ax = plt.subplot(12, 10, n + 1)
plt.imshow(image_batch[n])
plt.title("class : "+str(int(label_batch[n])))
plt.axis('off')
fig.tight_layout()
# --- test data input ---
test_dir = "test/"
test_dir = pathlib.Path(test_dir)
image_count = len(list(test_dir.glob('*/*')))
class_names = np.array([item.name for item in test_dir.glob('*')])
try:
nclasses = len(class_names)
print(" Found image data, proceeding.\n")
print(" - Classes are {}".format(class_names))
except Exception:
raise IOError("Check your data!")
inp_shape = model.input_shape
img_height = inp_shape[1] # image height for all images
img_width = inp_shape[2] # image width for all images
# tells the Generator when to stop
BATCH_SIZE = 120
datagen = ImageDataGenerator(rescale=1./255.)
print("\n Fitting the teset data generator:\n")
data_gen_test = datagen.flow_from_directory(
directory=str(test_dir), batch_size=BATCH_SIZE, shuffle=False,
target_size=(img_height, img_width), classes=["0", "1"],
class_mode="binary")
image_batch, label_batch = next(data_gen_test)
show_batch(image_batch, label_batch)
```
## Predict new labels
```
# predict on the test data
ytrue = []
yhat = []
for step in range(data_gen_test.n // BATCH_SIZE):
print("step {} of {}".format(step+1, data_gen_test.n // BATCH_SIZE), end="\r")
X, y = data_gen_test.next()
yh = model.predict(X)
for i, j in zip(np.squeeze(y), np.squeeze(yh)):
ytrue.append(i)
yhat.append(j)
# predicted labels
TRX = 0.5
yhat = np.squeeze(yhat)
ypred = np.zeros(yhat.shape)
ypred[yhat > TRX] = 1
print(classification_report(ytrue, ypred))
```
|
github_jupyter
|
%tensorflow_version 2 # This tells Colab to use TF2
test
├───0
├───1
# PROGRAM : test_wave_breaking_classifier.py
# POURPOSE : classify wave breaking using a convnets
# AUTHOR : Caio Eadi Stringari
# EMAIL : caio.stringari@gmail.com
# V1.0 : 05/05/2020 [Caio Stringari]
from google.colab import drive
drive.mount('/content/drive')
!cp "/content/drive/My Drive/Colab Notebooks/FEM/data/test.tar.gz" .
!tar -zxf test.tar.gz
import os
import numpy as np
import tensorflow as tf
import pathlib
from sklearn.metrics import classification_report
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
modelpath = "/content/drive/My Drive/Colab Notebooks/FEM/InceptionResNetV2Baseline.h5"
model = tf.keras.models.load_model(modelpath)
def show_batch(image_batch, label_batch):
fig = plt.figure(figsize=(10, 10))
for n in range(len(image_batch)):
ax = plt.subplot(12, 10, n + 1)
plt.imshow(image_batch[n])
plt.title("class : "+str(int(label_batch[n])))
plt.axis('off')
fig.tight_layout()
# --- test data input ---
test_dir = "test/"
test_dir = pathlib.Path(test_dir)
image_count = len(list(test_dir.glob('*/*')))
class_names = np.array([item.name for item in test_dir.glob('*')])
try:
nclasses = len(class_names)
print(" Found image data, proceeding.\n")
print(" - Classes are {}".format(class_names))
except Exception:
raise IOError("Check your data!")
inp_shape = model.input_shape
img_height = inp_shape[1] # image height for all images
img_width = inp_shape[2] # image width for all images
# tells the Generator when to stop
BATCH_SIZE = 120
datagen = ImageDataGenerator(rescale=1./255.)
print("\n Fitting the teset data generator:\n")
data_gen_test = datagen.flow_from_directory(
directory=str(test_dir), batch_size=BATCH_SIZE, shuffle=False,
target_size=(img_height, img_width), classes=["0", "1"],
class_mode="binary")
image_batch, label_batch = next(data_gen_test)
show_batch(image_batch, label_batch)
# predict on the test data
ytrue = []
yhat = []
for step in range(data_gen_test.n // BATCH_SIZE):
print("step {} of {}".format(step+1, data_gen_test.n // BATCH_SIZE), end="\r")
X, y = data_gen_test.next()
yh = model.predict(X)
for i, j in zip(np.squeeze(y), np.squeeze(yh)):
ytrue.append(i)
yhat.append(j)
# predicted labels
TRX = 0.5
yhat = np.squeeze(yhat)
ypred = np.zeros(yhat.shape)
ypred[yhat > TRX] = 1
print(classification_report(ytrue, ypred))
| 0.397704 | 0.91302 |
# Introduction to Data Analysis
This notebook serves as a summary of the fundamentals covered in chapter 1. For a Python crash-course/refresher, work through the [`python_101.ipynb`](./python_101.ipynb) notebook.
## Setup
```
from visual_aids import stats_viz
```
## Fundamentals of data analysis
When conducting a data analysis, we will move back and forth between four main processes:
- **Data Collection**: Every analysis starts with collecting data. We can collect data from a variety of sources, including databases, APIs, flat files, and the Internet.
- **Data Wrangling**: After we have our data, we need to prepare it for our analysis. This may involve reshaping it, changing data types, handling missing values, and/or aggregating it.
- **Exploratory Data Analysis (EDA)**: We can use visualizations to explore our data and summarize it. During this time, we will also begin exploring the data by looking at its structure, format, and summary statistics.
- **Drawing Conclusions**: After we have thoroughly explored our data, we can try to draw conclusions or model it.
## Statistical Foundations
As this is not a statistics book, we will discuss the concepts we will need to work through the book, in addition to some avenues for further exploration. By no means is this exhaustive.
### Sampling
Some resampling (sampling from the sample) techniques we will see throughout the book, especially for the chapters on machine learning (9-11):
- **simple random sampling**: pick with a random number generator
- **stratified random sampling**: randomly pick preserving the proportion of groups in the data
- **bootstrapping**: sampling with replacement (more info: [YouTube video](https://www.youtube.com/watch?v=gcPIyeqymOU) and [Wikipedia article](https://en.wikipedia.org/wiki/Bootstrapping_(statistics)))
### Descriptive Statistics
We use descriptive statistics to describe the data. The data we work with is usually a **sample** taken from the **population**. The statistics we will discuss here are referred to as **sample statistics** because they are calculated on the sample and can be used as estimators for the population parameters.
#### Measures of Center
Three common ways to describe the central tendency of a distribution are mean, median, and mode.
##### Mean
The sample mean is an estimator for the population mean ($\mu$) and is defined as:
$$\bar{x} = \frac{\sum_{1}^{n} x_i}{n}$$
##### Median
The median represents the 50<sup>th</sup> percentile of our data; this means that 50% of the values are greater than the median and 50% are less than the median. It is calculated by taking the middle value from an ordered list of values.
##### Mode
The mode is the most common value in the data. We can use it to describe categorical data or, for continuous data, the shape of the distribution:
```
ax = stats_viz.different_modal_plots()
```
#### Measures of Spread
Measures of spread tell us how the data is dispersed; this will indicate how thin (low dispersion) or wide (very spread out) our distribution is.
##### Range
The range is the distance between the smallest value (minimum) and the largest value (maximum):
$$range = max(X) - min(X)$$
##### Variance
The variance describes how far apart observations are spread out from their average value (the mean). When calculating the sample variance, we divide by *n - 1* instead of *n* to account for using the sample mean ($\bar{x}$):
$$s^2 = \frac{\sum_{1}^{n} (x_i - \bar{x})^2}{n - 1}$$
This is referred to as Bessel's correction and is applied to get an unbiased estimator of the population variance.
*Note that this will be in units-squared of whatever was being measured.*
##### Standard Deviation
The standard deviation is the square root of the variance, giving us a measure in the same units as our data. The sample standard deviation is calculated as follows:
$$s = \sqrt{\frac{\sum_{1}^{n} (x_i - \bar{x})^2}{n - 1}} = \sqrt{s^2}$$
```
ax = stats_viz.effect_of_std_dev()
```
*Note that $\sigma^2$ is the population variance and $\sigma$ is the population standard deviation.*
##### Coefficient of Variation
The coefficient of variation (CV) gives us a unitless ratio of the standard deviation to the mean. Since, it has no units we can compare dispersion across datasets:
$$CV = \frac{s}{\bar{x}}$$
##### Interquartile Range
The interquartile range (IQR) gives us the spread of data around the median and quantifies how much dispersion we have in the middle 50% of our distribution:
$$IQR = Q_3 - Q_1$$
##### Quartile Coefficient of Dispersion
The quartile coefficient of dispersion also is a unitless statistic for comparing datasets. However, it uses the median as the measure of center. It is calculated by dividing the semi-quartile range (half the IQR) by the midhinge (midpoint between the first and third quartiles):
$$QCD = \frac{\frac{Q_3 - Q_1}{2}}{\frac{Q_1 + Q_3}{2}} = \frac{Q_3 - Q_1}{Q_3 + Q_1}$$
#### Summarizing data
The **5-number summary** provides 5 descriptive statistics that summarize our data:
| | Quartile | Statistic | Percentile |
| --- | --- | --- | --- |
|1.|$Q_0$|minimum|$0^{th}$|
|2.|$Q_1$|N/A|$25^{th}$|
|3.|$Q_2$|median|$50^{th}$|
|4.|$Q_3$|N/A|$75^{th}$|
|5.|$Q_4$|maximum|$100^{th}$|
This summary can be visualized using a **box plot** (also called box-and-whisker plot). The box has an upper bound of $Q_3$ and a lower bound of $Q_1$. The median will be a line somewhere in this box. The whiskers extend from the box towards the minimum/maximum. For our purposes, they will extend to $Q_3 + 1.5 \times IQR$ and $Q_1 - 1.5 \times IQR$ and anything beyond will be represented as individual points for outliers:
```
ax = stats_viz.example_boxplot()
```
The box plot doesn't show us how the data is distributed within the quartiles. To get a better sense of the distribution, we can use a **histogram**, which will show us the amount of observations that fall into equal-width bins. We can vary the number of bins to use, but be aware that this can change our impression of what the distribution appears to be:
```
ax = stats_viz.example_histogram()
```
We can also visualize the distribution using a **kernel density estimate (KDE)**. This will estimate the **probability density function (PDF)**. This function shows how probability is distributed over the values. Higher values of the PDF mean higher likelihoods:
```
ax = stats_viz.example_kde()
```
Note that both the KDE and histogram estimate the distribution:
```
ax = stats_viz.hist_and_kde()
```
**Skewed distributions** have more observations on one side. The mean will be less than the median with negative skew, while the opposite is true of positive skew:
```
ax = stats_viz.skew_examples()
```
We can use the **cumulative distribution function (CDF)** to find probabilities of getting values within a certain range. The CDF is the integral of the PDF:
$$CDF = F(x) = \int_{-\infty}^{x} f(t) dt$$
*Note that $f(t)$ is the PDF and $\int_{-\infty}^{\infty} f(t) dt = 1$.*
The probability of the random variable $X$ being less than or equal to the specific value of $x$ is denoted as $P(X ≤ x)$. Note that for a continuous random variable the probability of it being exactly $x$ is zero.
Let's look at the estimate of the CDF from the sample data we used for the box plot, called the **empirical cumulative distribution function (ECDF)**:
```
ax = stats_viz.cdf_example()
```
*We can find any range we want if we use some algebra as in the rightmost subplot above.*
#### Common Distributions
- **Gaussian (normal) distribution**: looks like a bell curve and is parameterized by its mean (μ) and standard deviation (σ). Many things in nature happen to follow the normal distribution, like heights. Note that testing if a distribution is normal is not trivial. Written as $N(\mu, \sigma)$.
- **Poisson distribution**: discrete distribution that is often used to model arrivals. Parameterized by its mean, lambda (λ). Written as $Pois(\lambda)$.
- **Exponential distribution**: can be used to model the time between arrivals. Parameterized by its mean, lambda (λ). Written as $Exp(\lambda)$.
- **Uniform distribution**: places equal likelihood on each value within its bounds (*a* and *b*). We often use this for random number generation. Written as $U(a, b)$.
- **Bernoulli distribution**: When we pick a random number to simulate a single success/failure outcome, it is called a Bernoulli trial. This is parameterized by the probability of success (*p*). Written as $Bernoulli(p)$.
- **Binomial distribution**: When we run the same experiment *n* times, the total number of successes is then a binomial random variable. Written as $B(n, p)$.
We can visualize both discrete and continuous distributions; however, discrete distributions give us a **probability mass function** (**PMF**) instead of a PDF:
```
ax = stats_viz.common_dists()
```
#### Scaling data
In order to compare variables from different distributions, we would have to scale the data, which we could do with the range by using **min-max scaling**:
$$x_{scaled}=\frac{x - min(X)}{range(X)}$$
Another way is to use a **Z-score** to standardize the data:
$$z_i = \frac{x_i - \bar{x}}{s}$$
#### Quantifying relationships between variables
The **covariance** is a statistic for quantifying the relationship between variables by showing how one variable changes with respect to another (also referred to as their joint variance):
$$cov(X, Y) = E[(X-E[X])(Y-E[Y])]$$
*E[X] is the expectation of the random variable X (its long-run average).*
The sign of the covariance gives us the direction of the relationship, but we need the magnitude as well. For that, we calculate the **Pearson correlation coefficient** ($\rho$):
$$\rho_{X, Y} = \frac{cov(X, Y)}{s_X s_Y}$$
Examples:
```
ax = stats_viz.correlation_coefficient_examples()
```
*From left to right: no correlation, weak negative correlation, strong positive correlation, and nearly perfect negative correlation.*
Often, it is more informative to use scatter plots to check for relationships between variables. This is because the correlation may be strong, but the relationship may not be linear:
```
ax = stats_viz.non_linear_relationships()
```
Remember, **correlation does not imply causation**. While we may find a correlation between X and Y, it does not mean that X causes Y or Y causes X. It is possible there is some Z that causes both or that X causes some intermediary event that causes Y — it could even be a coincidence. Be sure to check out Tyler Vigen's [Spurious Correlations blog](https://www.tylervigen.com/spurious-correlations) for some interesting correlations.
#### Pitfalls of summary statistics
Not only can our correlation coefficients be misleading, but so can summary statistics. Anscombe's quartet is a collection of four different datasets that have identical summary statistics and correlation coefficients, however, when plotted, it is obvious they are not similar:
```
ax = stats_viz.anscombes_quartet()
```
Another example of this is the [Datasaurus Dozen](https://www.autodeskresearch.com/publications/samestats):
```
ax = stats_viz.datasaurus_dozen()
```
### Prediction and forecasting
Say our favorite ice cream shop has asked us to help predict how many ice creams they can expect to sell on a given day. They are convinced that the temperature outside has strong influence on their sales, so they collected data on the number of ice creams sold at a given temperature. We agree to help them, and the first thing we do is make a scatter plot of the data they gave us:
```
ax = stats_viz.example_scatter_plot()
```
We can observe an upward trend in the scatter plot: more ice creams are sold at higher temperatures. In order to help out the ice cream shop, though, we need to find a way to make predictions from this data. We can use a technique called **regression** to model the relationship between temperature and ice cream sales with an equation:
```
ax = stats_viz.example_regression()
```
We can use the resulting equation to make predictions for the number of ice creams sold at various temperatures. However, we must keep in mind if we are interpolating or extrapolating. If the temperature value we are using for prediction is within the range of the original data we used to build our regression model, then we are **interpolating** (solid portion of the red line). On the other hand, if the temperature is beyond the values in the original data, we are **extrapolating**, which is very dangerous, since we can't assume the pattern continues indefinitely in each direction (dotted portion of the line). Extremely hot temperatures may cause people to stay inside, meaning no ice creams will be sold, while the equation indicates record-high sales.
Forecasting is a type of prediction for time series. In a process called **time series decomposition**, time series is decomposed into a trend component, a seasonality component, and a cyclical component. These components can be combined in an additive or multiplicative fashion:
```
ax = stats_viz.time_series_decomposition_example()
```
The **trend** component describes the behavior of the time series in the long-term without accounting for the seasonal or cyclical effects. Using the trend, we can make broad statements about the time series in the long-run, such as: *the population of Earth is increasing* or *the value of a stock is stagnating*. **Seasonality** of a time series explains the systematic and calendar-related movements of a time series. For example, the number of ice cream trucks on the streets of New York City is high in the summer and drops to nothing in the winter; this pattern repeats every year regardless of whether the actual amount each summer is the same. Lastly, the **cyclical** component accounts for anything else unexplained or irregular with the time series; this could be something like a hurricane driving the number of ice cream trucks down in the short-term because it isn't safe to be outside. This component is difficult to anticipate with a forecast due to its unexpected nature.
When making models to forecast time series, some common methods include ARIMA-family methods and exponential smoothing. **ARIMA** stands for autoregressive (AR), integrated (I), moving average (MA). Autoregressive models take advantage of the fact that an observation at time $t$ is correlated to a previous observation, for example at time $t - 1$. Note that not all time series are autoregressive. The integrated component concerns the differenced data, or the change in the data from one time to another. Lastly, the moving average component uses a sliding window to average the last $x$ observations where $x$ is the length of the sliding window. We will build an ARIMA model in chapter 7.
The moving average puts equal weight on each time period in the past involved in the calculation. In practice, this isn't always a realistic expectation of our data. Sometimes all past values are important, but they vary in their influence on future data points. For these cases, we can use exponential smoothing, which allows us to put more weight on more recent values and less weight on values further away from what we are predicting.
### Inferential Statistics
Inferential statistics deals with inferring or deducing things from the sample data we have in order to make statements about the population as a whole. Before doing so, we need to know whether we conducted an observational study or an experiment. An observational study can't be used to determine causation because we can't control for everything. An experiment on the other hand is controlled.
Remember that the sample statistics we discussed earlier are estimators for the population parameters. Our estimators need **confidence intervals**, which provide a point estimate and a margin of error around it. This is the range that the true population parameter will be in at a certain **confidence level**. At the 95% confidence level, 95% of the confidence intervals calculated from random samples of the population contain the true population parameter.
We also have the option of using **hypothesis testing**. First, we define a null hypothesis (say the true population mean is 0), then we determine a **significance level** (1 - confidence level), which is the probability of rejecting the null hypothesis when it is true. Our result is statistically significant if the value for the null hypothesis is outside the confidence interval. [More info](https://statisticsbyjim.com/hypothesis-testing/hypothesis-tests-confidence-intervals-levels/).
<hr>
<div style="overflow: hidden; margin-bottom: 10px;">
<div style="float: left;">
<a href="./checking_your_setup.ipynb">
<button>Check your setup</button>
</a>
<a href="./python_101.ipynb">
<button>Python 101</button>
</a>
</div>
<div style="float: right;">
<a href="./exercises.ipynb">
<button>Exercises</button>
</a>
<a href="../ch_02/1-pandas_data_structures.ipynb">
<button>Chapter 2 →</button>
</a>
</div>
</div>
<hr>
|
github_jupyter
|
from visual_aids import stats_viz
ax = stats_viz.different_modal_plots()
ax = stats_viz.effect_of_std_dev()
ax = stats_viz.example_boxplot()
ax = stats_viz.example_histogram()
ax = stats_viz.example_kde()
ax = stats_viz.hist_and_kde()
ax = stats_viz.skew_examples()
ax = stats_viz.cdf_example()
ax = stats_viz.common_dists()
ax = stats_viz.correlation_coefficient_examples()
ax = stats_viz.non_linear_relationships()
ax = stats_viz.anscombes_quartet()
ax = stats_viz.datasaurus_dozen()
ax = stats_viz.example_scatter_plot()
ax = stats_viz.example_regression()
ax = stats_viz.time_series_decomposition_example()
| 0.63341 | 0.994456 |
# Example: CanvasXpress ridgeline Chart No. 3
This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:
https://www.canvasxpress.org/examples/ridgeline-3.html
This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.
Everything required for the chart to render is included in the code below. Simply run the code block.
```
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="ridgeline3",
data={
"z": {
"Species": [
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica"
]
},
"y": {
"vars": [
"s1",
"s2",
"s3",
"s4",
"s5",
"s6",
"s7",
"s8",
"s9",
"s10",
"s11",
"s12",
"s13",
"s14",
"s15",
"s16",
"s17",
"s18",
"s19",
"s20",
"s21",
"s22",
"s23",
"s24",
"s25",
"s26",
"s27",
"s28",
"s29",
"s30",
"s31",
"s32",
"s33",
"s34",
"s35",
"s36",
"s37",
"s38",
"s39",
"s40",
"s41",
"s42",
"s43",
"s44",
"s45",
"s46",
"s47",
"s48",
"s49",
"s50",
"s51",
"s52",
"s53",
"s54",
"s55",
"s56",
"s57",
"s58",
"s59",
"s60",
"s61",
"s62",
"s63",
"s64",
"s65",
"s66",
"s67",
"s68",
"s69",
"s70",
"s71",
"s72",
"s73",
"s74",
"s75",
"s76",
"s77",
"s78",
"s79",
"s80",
"s81",
"s82",
"s83",
"s84",
"s85",
"s86",
"s87",
"s88",
"s89",
"s90",
"s91",
"s92",
"s93",
"s94",
"s95",
"s96",
"s97",
"s98",
"s99",
"s100",
"s101",
"s102",
"s103",
"s104",
"s105",
"s106",
"s107",
"s108",
"s109",
"s110",
"s111",
"s112",
"s113",
"s114",
"s115",
"s116",
"s117",
"s118",
"s119",
"s120",
"s121",
"s122",
"s123",
"s124",
"s125",
"s126",
"s127",
"s128",
"s129",
"s130",
"s131",
"s132",
"s133",
"s134",
"s135",
"s136",
"s137",
"s138",
"s139",
"s140",
"s141",
"s142",
"s143",
"s144",
"s145",
"s146",
"s147",
"s148",
"s149",
"s150"
],
"smps": [
"Sepal.Length"
],
"data": [
[
5.1
],
[
4.9
],
[
4.7
],
[
4.6
],
[
5
],
[
5.4
],
[
4.6
],
[
5
],
[
4.4
],
[
4.9
],
[
5.4
],
[
4.8
],
[
4.8
],
[
4.3
],
[
5.8
],
[
5.7
],
[
5.4
],
[
5.1
],
[
5.7
],
[
5.1
],
[
5.4
],
[
5.1
],
[
4.6
],
[
5.1
],
[
4.8
],
[
5
],
[
5
],
[
5.2
],
[
5.2
],
[
4.7
],
[
4.8
],
[
5.4
],
[
5.2
],
[
5.5
],
[
4.9
],
[
5
],
[
5.5
],
[
4.9
],
[
4.4
],
[
5.1
],
[
5
],
[
4.5
],
[
4.4
],
[
5
],
[
5.1
],
[
4.8
],
[
5.1
],
[
4.6
],
[
5.3
],
[
5
],
[
7
],
[
6.4
],
[
6.9
],
[
5.5
],
[
6.5
],
[
5.7
],
[
6.3
],
[
4.9
],
[
6.6
],
[
5.2
],
[
5
],
[
5.9
],
[
6
],
[
6.1
],
[
5.6
],
[
6.7
],
[
5.6
],
[
5.8
],
[
6.2
],
[
5.6
],
[
5.9
],
[
6.1
],
[
6.3
],
[
6.1
],
[
6.4
],
[
6.6
],
[
6.8
],
[
6.7
],
[
6
],
[
5.7
],
[
5.5
],
[
5.5
],
[
5.8
],
[
6
],
[
5.4
],
[
6
],
[
6.7
],
[
6.3
],
[
5.6
],
[
5.5
],
[
5.5
],
[
6.1
],
[
5.8
],
[
5
],
[
5.6
],
[
5.7
],
[
5.7
],
[
6.2
],
[
5.1
],
[
5.7
],
[
6.3
],
[
5.8
],
[
7.1
],
[
6.3
],
[
6.5
],
[
7.6
],
[
4.9
],
[
7.3
],
[
6.7
],
[
7.2
],
[
6.5
],
[
6.4
],
[
6.8
],
[
5.7
],
[
5.8
],
[
6.4
],
[
6.5
],
[
7.7
],
[
7.7
],
[
6
],
[
6.9
],
[
5.6
],
[
7.7
],
[
6.3
],
[
6.7
],
[
7.2
],
[
6.2
],
[
6.1
],
[
6.4
],
[
7.2
],
[
7.4
],
[
7.9
],
[
6.4
],
[
6.3
],
[
6.1
],
[
7.7
],
[
6.3
],
[
6.4
],
[
6
],
[
6.9
],
[
6.7
],
[
6.9
],
[
5.8
],
[
6.8
],
[
6.7
],
[
6.7
],
[
6.3
],
[
6.5
],
[
6.2
],
[
5.9
]
]
}
},
config={
"colorBy": "Species",
"graphType": "Scatter2D",
"hideHistogram": True,
"histogramBins": 20,
"ridgeBy": "Species",
"ridgelineScale": 2.5,
"showFilledHistogramDensity": True,
"showHistogramDataPoints": True,
"showHistogramDensity": True
},
width=613,
height=613,
events=CXEvents(),
after_render=[
[
"createHistogram",
[
"Species",
None,
None
]
]
],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="ridgeline_3.html")
```
|
github_jupyter
|
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="ridgeline3",
data={
"z": {
"Species": [
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"setosa",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"versicolor",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica",
"virginica"
]
},
"y": {
"vars": [
"s1",
"s2",
"s3",
"s4",
"s5",
"s6",
"s7",
"s8",
"s9",
"s10",
"s11",
"s12",
"s13",
"s14",
"s15",
"s16",
"s17",
"s18",
"s19",
"s20",
"s21",
"s22",
"s23",
"s24",
"s25",
"s26",
"s27",
"s28",
"s29",
"s30",
"s31",
"s32",
"s33",
"s34",
"s35",
"s36",
"s37",
"s38",
"s39",
"s40",
"s41",
"s42",
"s43",
"s44",
"s45",
"s46",
"s47",
"s48",
"s49",
"s50",
"s51",
"s52",
"s53",
"s54",
"s55",
"s56",
"s57",
"s58",
"s59",
"s60",
"s61",
"s62",
"s63",
"s64",
"s65",
"s66",
"s67",
"s68",
"s69",
"s70",
"s71",
"s72",
"s73",
"s74",
"s75",
"s76",
"s77",
"s78",
"s79",
"s80",
"s81",
"s82",
"s83",
"s84",
"s85",
"s86",
"s87",
"s88",
"s89",
"s90",
"s91",
"s92",
"s93",
"s94",
"s95",
"s96",
"s97",
"s98",
"s99",
"s100",
"s101",
"s102",
"s103",
"s104",
"s105",
"s106",
"s107",
"s108",
"s109",
"s110",
"s111",
"s112",
"s113",
"s114",
"s115",
"s116",
"s117",
"s118",
"s119",
"s120",
"s121",
"s122",
"s123",
"s124",
"s125",
"s126",
"s127",
"s128",
"s129",
"s130",
"s131",
"s132",
"s133",
"s134",
"s135",
"s136",
"s137",
"s138",
"s139",
"s140",
"s141",
"s142",
"s143",
"s144",
"s145",
"s146",
"s147",
"s148",
"s149",
"s150"
],
"smps": [
"Sepal.Length"
],
"data": [
[
5.1
],
[
4.9
],
[
4.7
],
[
4.6
],
[
5
],
[
5.4
],
[
4.6
],
[
5
],
[
4.4
],
[
4.9
],
[
5.4
],
[
4.8
],
[
4.8
],
[
4.3
],
[
5.8
],
[
5.7
],
[
5.4
],
[
5.1
],
[
5.7
],
[
5.1
],
[
5.4
],
[
5.1
],
[
4.6
],
[
5.1
],
[
4.8
],
[
5
],
[
5
],
[
5.2
],
[
5.2
],
[
4.7
],
[
4.8
],
[
5.4
],
[
5.2
],
[
5.5
],
[
4.9
],
[
5
],
[
5.5
],
[
4.9
],
[
4.4
],
[
5.1
],
[
5
],
[
4.5
],
[
4.4
],
[
5
],
[
5.1
],
[
4.8
],
[
5.1
],
[
4.6
],
[
5.3
],
[
5
],
[
7
],
[
6.4
],
[
6.9
],
[
5.5
],
[
6.5
],
[
5.7
],
[
6.3
],
[
4.9
],
[
6.6
],
[
5.2
],
[
5
],
[
5.9
],
[
6
],
[
6.1
],
[
5.6
],
[
6.7
],
[
5.6
],
[
5.8
],
[
6.2
],
[
5.6
],
[
5.9
],
[
6.1
],
[
6.3
],
[
6.1
],
[
6.4
],
[
6.6
],
[
6.8
],
[
6.7
],
[
6
],
[
5.7
],
[
5.5
],
[
5.5
],
[
5.8
],
[
6
],
[
5.4
],
[
6
],
[
6.7
],
[
6.3
],
[
5.6
],
[
5.5
],
[
5.5
],
[
6.1
],
[
5.8
],
[
5
],
[
5.6
],
[
5.7
],
[
5.7
],
[
6.2
],
[
5.1
],
[
5.7
],
[
6.3
],
[
5.8
],
[
7.1
],
[
6.3
],
[
6.5
],
[
7.6
],
[
4.9
],
[
7.3
],
[
6.7
],
[
7.2
],
[
6.5
],
[
6.4
],
[
6.8
],
[
5.7
],
[
5.8
],
[
6.4
],
[
6.5
],
[
7.7
],
[
7.7
],
[
6
],
[
6.9
],
[
5.6
],
[
7.7
],
[
6.3
],
[
6.7
],
[
7.2
],
[
6.2
],
[
6.1
],
[
6.4
],
[
7.2
],
[
7.4
],
[
7.9
],
[
6.4
],
[
6.3
],
[
6.1
],
[
7.7
],
[
6.3
],
[
6.4
],
[
6
],
[
6.9
],
[
6.7
],
[
6.9
],
[
5.8
],
[
6.8
],
[
6.7
],
[
6.7
],
[
6.3
],
[
6.5
],
[
6.2
],
[
5.9
]
]
}
},
config={
"colorBy": "Species",
"graphType": "Scatter2D",
"hideHistogram": True,
"histogramBins": 20,
"ridgeBy": "Species",
"ridgelineScale": 2.5,
"showFilledHistogramDensity": True,
"showHistogramDataPoints": True,
"showHistogramDensity": True
},
width=613,
height=613,
events=CXEvents(),
after_render=[
[
"createHistogram",
[
"Species",
None,
None
]
]
],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="ridgeline_3.html")
| 0.420243 | 0.866641 |
# Simulating Language, Lab 9, Gene-culture co-evolution
We're going to use the same code as the last lab to do something similar to Smith & Kirby (2008) and discover what types of prior and learning strategy combinations are evolutionarily stable. You may be surprised to find that we really don't need much more than the code we already have to do this!
## Code from Lab 8
Here's the code from Lab 8, with no changes.
```
import random
%matplotlib inline
import matplotlib.pyplot as plt
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf')
from math import log, log1p, exp
from scipy.special import logsumexp
from numpy import mean # This is a handy function that calculate the average of a list
```
### Parameters for language
```
variables = 2 # The number of different variables in the language
variants = 2 # The number of different variants each variable can take
```
### Log probability functions
```
def log_subtract(x,y):
return x + log1p(-exp(y - x))
def normalize_logprobs(logprobs):
logtotal = logsumexp(logprobs) #calculates the summed log probabilities
normedlogs = []
for logp in logprobs:
normedlogs.append(logp - logtotal) #normalise - subtracting in the log domain
#equivalent to dividing in the normal domain
return normedlogs
def log_roulette_wheel(normedlogs):
r = log(random.random()) #generate a random number in [0,1), then convert to log
accumulator = normedlogs[0]
for i in range(len(normedlogs)):
if r < accumulator:
return i
accumulator = logsumexp([accumulator, normedlogs[i + 1]])
def wta(probs):
maxprob = max(probs) # Find the maximum probability (works if these are logs or not)
candidates = []
for i in range(len(probs)):
if probs[i] == maxprob:
candidates.append(i) # Make a list of all the indices with that maximum probability
return random.choice(candidates)
```
### Production of data
```
def produce(language, log_error_probability):
variable = random.randrange(len(language)) # Pick a variant to produce
correct_variant = language[variable]
if log(random.random()) > log_error_probability:
return variable, correct_variant # Return the variable, variant pair
else:
possible_error_variants = list(range(variants))
possible_error_variants.remove(correct_variant)
error_variant = random.choice(possible_error_variants)
return variable, error_variant
```
### Function to check if language is regular
```
def regular(language):
first_variant = language[0]
for variant in language:
if variant != first_variant:
return False # The language can only be regular if every variant is the same as the first
return True
```
### Prior
```
def logprior(language, log_bias):
if regular(language):
number_of_regular_languages = variants
return log_bias - log(number_of_regular_languages) #subtracting logs = dividing
else:
number_of_irregular_languages = variants ** variables - variants # the double star here means raise to the power
# e.g. 4 ** 2 is four squared
return log_subtract(0, log_bias) - log(number_of_irregular_languages)
# log(1) is 0, so log_subtract(0, bias) is equivalent to (1 - bias) in the
# non-log domain
```
### Likelihood
```
def loglikelihood(data, language, log_error_probability):
loglikelihoods = []
logp_correct = log_subtract(0, log_error_probability) #probability of producing correct form
logp_incorrect = log_error_probability - log(variants - 1) #logprob of each incorrect variant
for utterance in data:
variable = utterance[0]
variant = utterance[1]
if variant == language[variable]:
loglikelihoods.append(logp_correct)
else:
loglikelihoods.append(logp_incorrect)
return sum(loglikelihoods) #summing log likelihoods = multiplying likelihoods
```
### Learning
```
def all_languages(variables, variants):
if variables == 0:
return [[]] # The list of all languages with zero variables is just one language, and that's empty
else:
result = [] # If we are looking for a list of languages with more than zero variables,
# then we'll need to build a list
smaller_langs = all_languages(variables - 1, variants) # Let's first find all the languages with one
# fewer variables
for language in smaller_langs: # For each of these smaller languages, we're going to have to create a more
# complex language by adding each of the possible variants
for variant in range(variants):
result.append(language + [variant])
return result
def learn(data, log_bias, log_error_probability, learning_type):
list_of_all_languages = all_languages(variables, variants) # uses the parameters we set above
list_of_posteriors = []
for language in list_of_all_languages:
this_language_posterior = loglikelihood(data, language, log_error_probability) + logprior(language, log_bias)
list_of_posteriors.append(this_language_posterior)
if learning_type == 'map':
map_language_index = wta(list_of_posteriors) # For MAP learning, we pick the best language
map_language = list_of_all_languages[map_language_index]
return map_language
if learning_type == 'sample':
normalized_posteriors = normalize_logprobs(list_of_posteriors)
sampled_language_index = log_roulette_wheel(normalized_posteriors) # For sampling, we use the roulette wheel
sampled_language = list_of_all_languages[sampled_language_index]
return sampled_language
```
### Iterated learning
```
def iterate(generations, bottleneck, log_bias, log_error_probability, learning_type):
language = random.choice(all_languages(variables, variants))
if regular(language):
accumulator = [1]
else:
accumulator = [0]
language_accumulator = [language]
for generation in range(generations):
data = []
for i in range(bottleneck):
data.append(produce(language, log_error_probability))
language = learn(data, log_bias, log_error_probability, learning_type)
if regular(language):
accumulator.append(1)
else:
accumulator.append(0)
language_accumulator.append(language)
return accumulator, language_accumulator
```
## New code
Imagine we have a population of individuals who share a cognitive bias and a learning strategy (i.e., sampling or map) that they are born with. In other words, it is encoded in their genes. These individuals transmit their linguistic behaviour culturally through iterated learning, eventually leading to a particular distribution over languages emerging. We can find that distribution for a particular combination of prior bias and learning strategy by running a long iterated learning chain, just like we were doing in the last lab.
Now, imagine that there is some genetic mutation in this population and we have an individual who has a different prior and/or learning strategy. We can ask the question: will this mutation have an evolutionary advantage? In other words, will it spread through the population, or will it die out?
To answer this question, we need first to think about what it means to have a survival advantage? One obvious answer is that you might have a survival advantage if you are able to learn the language of the population well. Presumably, if you learn the language of the population poorly you won't be able to communicate as well and will be at a disadvantage.
The function `learning_success` allows us to estimate how well a particular type of learner will do when attempting to learn any one of a set of languages we input. The function takes the usual parameters you might expect: the bottleneck, the bias, the error probability, and the type of learner (`sample` or `map`). However, it also takes a list of different languages, and a number of test trials. Each test trial involves:
1. picking at random one of the languages in the list,
2. producing a number of utterances from that language (using the `bottleneck` parameter)
3. learning a new language from that list of utterances
4. checking whether the new language is identical to the one we originally picked (in which case we count this as a learning success)
At the end it gives us the proportion of trials which were successful.
```
def learning_success(bottleneck, log_bias, log_error_probability, learning_type, languages, trials):
success = 0
for i in range(trials):
input_language = random.choice(languages)
data = []
for i in range(bottleneck):
data.append(produce(input_language, log_error_probability))
output_language = learn(data, log_bias, log_error_probability, learning_type)
if output_language == input_language:
success = success + 1
return success / trials
```
We can use this function in combination with the iterate function to see how well a particular type of learner will learn languages that emerge from cultural evolution. For example, try the following:
```
languages = iterate(100000, 5, log(0.6), log(0.05), 'map')[1]
print(learning_success(5, log(0.6), log(0.05), 'map', languages, 100000))
```
This will run an iterated learning simulation for 100,000 generations with a MAP learner and a bias of 0.6. Then it will test how well the same kind of learner learns the languages that emerge from that simulation. To get an accurate result, it runs the learning test for 100,000 trials. These two numbers (the generations and the test trials) don't need to be the same, but should ideally be quite large so that we can get accurate estimates. You can try running them with lower numbers a bunch of times and see how variable the results are to get a rough and ready idea of how accurate the samples are.
```
languages = iterate(100000, 5, log(0.6), log(0.05), 'map')[1]
print(learning_success(5, log(0.6), log(0.05), 'map', languages, 100000))
```
OK, but how does this help us tell what kind of biases and learning strategies will evolve? As I discussed above, we want to see if a mutation will have an advantage (and therefore is likely to spread through a population) or not. So, really, we want to know how well a learner will do at learning, who *isn't* the same as the one that created the languages. Try this:
```
print(learning_success(5, log(0.6), log(0.05), 'sample', languages, 100000))
```
The original list of languages was created by a population of MAP learners. Now we're testing what the expected success of a learner with a sampling strategy would be if exposed to one of these languages. If this number is higher than the number we got above, then the mutation could spread through the population. If this number is lower than the number we got above, we can expect it to die out. You may find that these numbers are quite similar (which is why we need large numbers for learning trials and genenerations to get an accurate estimate). This suggests that in some cases the selection pressure on the evolution of these genes might not be enormous, but nevertheless small differences in fitness can nevertheless lead to big changes over time.
```
print(learning_success(5, log(0.6), log(0.05), 'sample', languages, 100000))
```
## Question
There's only one question for this lab, because I want you to think about how best you can explore it with the tools I've given you here!
You could answer this question just by typing in a bunch of commands like the examples above, or you could try and come up with a way of looping through different combinations. If you want, you could try and come up with a measure quantifying how big an advantage (or disadvantage) a mutation has in a particular population. If you want to be really fancy would be to then visualise these results in a graph somehow (hint: you can use `plt.imshow` to visualise a 2-dimensional list of numbers).
1. Which mutations will spread in different populations of learners, which mutations will die out, and which are selectively neutral (i.e. are neither better nor worse)?
*My approach to this is going to be to try three different prior biases, from very weak to very strong, plus the two types of learner (sample vs. map). So first up, for each of these combinations we'll run a long simulation to gather the set of languages that would emerge in a population with that learning strategy/bias combination. Just to keep things neat, let's write a function to do that.*
```
def generate_stationary_distributions(bias_learning_type_pairs):
stationary_distributions = []
for bias, learning_type in bias_learning_type_pairs:
print(bias, learning_type)
languages = iterate(100000, 5, log(bias), log(0.05), learning_type)[1]
stationary_distributions.append(languages)
return stationary_distributions
```
*This function I've just defined takes a list of bias, learning type pairs and runs a long simulation for each of them. You can think of a combination of a learning bias and a learning type (i.e. hypothesis selection strategy) as characterising a learner - it's what we assume is innate, and therefore provided by evolution. Let's choose a range of biases in favour of regularity from relatively weak (near 0.5) to relatively strong (near 1.0) and run these for both sample and map. This list below gives these different possible learners.*
```
learners = [(0.6, 'sample'), (0.7, 'sample'), (0.8, 'sample'),
(0.6, 'map'), (0.7, 'map'), (0.8, 'map')]
```
*Now we use this list and the function I defined to generate a list of stationary distributions (i.e. a list of languages) for each of these. **Strictly speaking, these aren't exactly the stationary distributions** since it should take some time for the culturally evolving system to settle into the stationary distribution. In other words, it'll take some time for the influence of the first language to be "washed out". However, since we're running for 100,000 generations, we can probably ignore this. (But maybe it would be better to change this to look only at the second half of the run?). For some values of bias (very high or very low), you may need to run longer simulations (both here and when evaluating learning in the next step) before you get accurate values, so please do bear that in mind!*
```
stationary_distributions = generate_stationary_distributions(learners)
```
*Now we need to test each of our six learners on each of these six distributions. This corresponds to how well a "mutant" learner will fare in a majority learner's culture. Here's a function to do this, which will give the result as a table (actually a list of lists). Each row of the table will correspond to the mutant learner, and each column will be the stationary distribution (i.e. the majority learner).*
```
def table_of_success(bias_learning_type_pairs, stationary_distributions):
table = []
for bias, learning_type in bias_learning_type_pairs:
print(bias, learning_type)
table_row = []
for languages in stationary_distributions:
success = learning_success(5, log(bias), log(0.05), learning_type, languages, 100000)
table_row.append(success)
table.append(table_row)
return table
results = table_of_success(learners, stationary_distributions)
```
*Let's look at those results... we'll start by just printing the table out, then trying to print it a bit more neatly!*
```
print(results)
for row in results:
for cell in row:
print(cell, end='\t') # this prints with a tab instead of a new line
print('\n') # this prints a newline
```
*Let's try and visualise these a bit better. Here's my first attempt, with `plt.imshow`*
```
plt.imshow(results)
```
*If I get a graph that looks useful, I then go to the matplotlib website and try and figure out how to make it more useful... This was a bit fiddly, but here's what I came up with after reading that website and googling around a bit :-)*
```
fig, ax = plt.subplots(1, 1)
fig = ax.imshow(results, extent=[0,6,6,0], cmap='coolwarm')
labels = ['.6 S', '.7 S', '.8 S', '.6 M', '.7 M', '.8 M']
ax.set_xticks([.5,1.5,2.5,3.5,4.5,5.5])
ax.set_xticklabels(labels)
ax.set_yticks([.5,1.5,2.5,3.5,4.5,5.5])
ax.set_yticklabels(labels)
ax.set_ylabel("Mutant")
ax.set_xlabel("Majority")
plt.colorbar(fig)
```
*So, it looks like there are general differences in strategy, with MAP learners learning better than samplers. But really, we want to know is not the overall learning success, but whether a mutant learner is better than the majority learner in the population into which it is born. If it is better, then it has a chance of taking over the population. To figure this out we need to know how well the learner will do if born into a population of other learners who are the same and then compare a mutant to this. If you think about it, this is the diagonal of the table above (i.e. when the mutant *is* the learner that created the stationary distribution). We can extract this as follows:*
```
self_learning = []
for i in range(6):
self_learning.append(results[i][i])
print(self_learning)
```
*Now we can compare each cell in the table and see if the learning success for the mutant is higher than the non-mutant, lower or the same.*
```
for minority in range(6):
for majority in range(6):
if results[minority][majority] > self_learning[majority]:
print(learners[minority], end=' ')
print('invades a population of', end=' ')
print(learners[majority])
elif results[minority][majority] < self_learning[majority]:
print(learners[minority], end=' ')
print('dies out in a population of', end=' ')
print(learners[majority])
```
*So, it looks like MAP learners invade populations of samplers often, but never the other way around. Also, it looks like samplers that don't match the specific bias of the population die out, whereas that's not so clearly the case with MAP. However, there's a problem with this way of looking at things. This doesn't show us how big an advantage one type of learner has over another, and because these are simulation runs, the results are going to be quite variable and we might have a tiny difference showing up just by chance. Because of this, let's instead plot the results but using a ratio of mutant success to majority success. This will give us an estimate of the **selective advantage** the mutant has. We'll make a new table and ratios and plot this.*
```
new_results = []
for minority in range(6):
new_row = []
for majority in range(6):
new_row.append(results[minority][majority] / self_learning[majority])
new_results.append(new_row)
fig, ax = plt.subplots(1, 1)
fig = ax.imshow(new_results, extent=[0,6,6,0], cmap='coolwarm')
labels = ['.6 S', '.7 S', '.8 S', '.6 M', '.7 M', '.8 M']
ax.set_xticks([.5,1.5,2.5,3.5,4.5,5.5])
ax.set_xticklabels(labels)
ax.set_yticks([.5,1.5,2.5,3.5,4.5,5.5])
ax.set_yticklabels(labels)
ax.set_ylabel("Mutant")
ax.set_xlabel("Majority")
plt.colorbar(fig)
```
*So, from this plot, we can see clearly that MAP learners will invade sampling populations, but not vice versa. Also, there isn't a clear difference between different bias strengths. At least for MAP learners, the strong biases are no more likely to invade the weaker biases. This means that if there is some cost to maintaining a strong bias, or if mutations are likely to degrade rather than strengthen strong biases, we can expect to end up with MAP learners with weak biases. The important point is that weakly biased MAP learners have the same stationary distribution as strongly biased MAP learners (as we saw last week). That means that even for cases where we see strong universal patterns in language, we should not expect that to be supported by strong innate constraints. These kinds of constraints are not likely to evolve even if selection is favouring learners who can learn the language of the population they're born into!*
|
github_jupyter
|
import random
%matplotlib inline
import matplotlib.pyplot as plt
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf')
from math import log, log1p, exp
from scipy.special import logsumexp
from numpy import mean # This is a handy function that calculate the average of a list
variables = 2 # The number of different variables in the language
variants = 2 # The number of different variants each variable can take
def log_subtract(x,y):
return x + log1p(-exp(y - x))
def normalize_logprobs(logprobs):
logtotal = logsumexp(logprobs) #calculates the summed log probabilities
normedlogs = []
for logp in logprobs:
normedlogs.append(logp - logtotal) #normalise - subtracting in the log domain
#equivalent to dividing in the normal domain
return normedlogs
def log_roulette_wheel(normedlogs):
r = log(random.random()) #generate a random number in [0,1), then convert to log
accumulator = normedlogs[0]
for i in range(len(normedlogs)):
if r < accumulator:
return i
accumulator = logsumexp([accumulator, normedlogs[i + 1]])
def wta(probs):
maxprob = max(probs) # Find the maximum probability (works if these are logs or not)
candidates = []
for i in range(len(probs)):
if probs[i] == maxprob:
candidates.append(i) # Make a list of all the indices with that maximum probability
return random.choice(candidates)
def produce(language, log_error_probability):
variable = random.randrange(len(language)) # Pick a variant to produce
correct_variant = language[variable]
if log(random.random()) > log_error_probability:
return variable, correct_variant # Return the variable, variant pair
else:
possible_error_variants = list(range(variants))
possible_error_variants.remove(correct_variant)
error_variant = random.choice(possible_error_variants)
return variable, error_variant
def regular(language):
first_variant = language[0]
for variant in language:
if variant != first_variant:
return False # The language can only be regular if every variant is the same as the first
return True
def logprior(language, log_bias):
if regular(language):
number_of_regular_languages = variants
return log_bias - log(number_of_regular_languages) #subtracting logs = dividing
else:
number_of_irregular_languages = variants ** variables - variants # the double star here means raise to the power
# e.g. 4 ** 2 is four squared
return log_subtract(0, log_bias) - log(number_of_irregular_languages)
# log(1) is 0, so log_subtract(0, bias) is equivalent to (1 - bias) in the
# non-log domain
def loglikelihood(data, language, log_error_probability):
loglikelihoods = []
logp_correct = log_subtract(0, log_error_probability) #probability of producing correct form
logp_incorrect = log_error_probability - log(variants - 1) #logprob of each incorrect variant
for utterance in data:
variable = utterance[0]
variant = utterance[1]
if variant == language[variable]:
loglikelihoods.append(logp_correct)
else:
loglikelihoods.append(logp_incorrect)
return sum(loglikelihoods) #summing log likelihoods = multiplying likelihoods
def all_languages(variables, variants):
if variables == 0:
return [[]] # The list of all languages with zero variables is just one language, and that's empty
else:
result = [] # If we are looking for a list of languages with more than zero variables,
# then we'll need to build a list
smaller_langs = all_languages(variables - 1, variants) # Let's first find all the languages with one
# fewer variables
for language in smaller_langs: # For each of these smaller languages, we're going to have to create a more
# complex language by adding each of the possible variants
for variant in range(variants):
result.append(language + [variant])
return result
def learn(data, log_bias, log_error_probability, learning_type):
list_of_all_languages = all_languages(variables, variants) # uses the parameters we set above
list_of_posteriors = []
for language in list_of_all_languages:
this_language_posterior = loglikelihood(data, language, log_error_probability) + logprior(language, log_bias)
list_of_posteriors.append(this_language_posterior)
if learning_type == 'map':
map_language_index = wta(list_of_posteriors) # For MAP learning, we pick the best language
map_language = list_of_all_languages[map_language_index]
return map_language
if learning_type == 'sample':
normalized_posteriors = normalize_logprobs(list_of_posteriors)
sampled_language_index = log_roulette_wheel(normalized_posteriors) # For sampling, we use the roulette wheel
sampled_language = list_of_all_languages[sampled_language_index]
return sampled_language
def iterate(generations, bottleneck, log_bias, log_error_probability, learning_type):
language = random.choice(all_languages(variables, variants))
if regular(language):
accumulator = [1]
else:
accumulator = [0]
language_accumulator = [language]
for generation in range(generations):
data = []
for i in range(bottleneck):
data.append(produce(language, log_error_probability))
language = learn(data, log_bias, log_error_probability, learning_type)
if regular(language):
accumulator.append(1)
else:
accumulator.append(0)
language_accumulator.append(language)
return accumulator, language_accumulator
def learning_success(bottleneck, log_bias, log_error_probability, learning_type, languages, trials):
success = 0
for i in range(trials):
input_language = random.choice(languages)
data = []
for i in range(bottleneck):
data.append(produce(input_language, log_error_probability))
output_language = learn(data, log_bias, log_error_probability, learning_type)
if output_language == input_language:
success = success + 1
return success / trials
languages = iterate(100000, 5, log(0.6), log(0.05), 'map')[1]
print(learning_success(5, log(0.6), log(0.05), 'map', languages, 100000))
languages = iterate(100000, 5, log(0.6), log(0.05), 'map')[1]
print(learning_success(5, log(0.6), log(0.05), 'map', languages, 100000))
print(learning_success(5, log(0.6), log(0.05), 'sample', languages, 100000))
print(learning_success(5, log(0.6), log(0.05), 'sample', languages, 100000))
def generate_stationary_distributions(bias_learning_type_pairs):
stationary_distributions = []
for bias, learning_type in bias_learning_type_pairs:
print(bias, learning_type)
languages = iterate(100000, 5, log(bias), log(0.05), learning_type)[1]
stationary_distributions.append(languages)
return stationary_distributions
learners = [(0.6, 'sample'), (0.7, 'sample'), (0.8, 'sample'),
(0.6, 'map'), (0.7, 'map'), (0.8, 'map')]
stationary_distributions = generate_stationary_distributions(learners)
def table_of_success(bias_learning_type_pairs, stationary_distributions):
table = []
for bias, learning_type in bias_learning_type_pairs:
print(bias, learning_type)
table_row = []
for languages in stationary_distributions:
success = learning_success(5, log(bias), log(0.05), learning_type, languages, 100000)
table_row.append(success)
table.append(table_row)
return table
results = table_of_success(learners, stationary_distributions)
print(results)
for row in results:
for cell in row:
print(cell, end='\t') # this prints with a tab instead of a new line
print('\n') # this prints a newline
plt.imshow(results)
fig, ax = plt.subplots(1, 1)
fig = ax.imshow(results, extent=[0,6,6,0], cmap='coolwarm')
labels = ['.6 S', '.7 S', '.8 S', '.6 M', '.7 M', '.8 M']
ax.set_xticks([.5,1.5,2.5,3.5,4.5,5.5])
ax.set_xticklabels(labels)
ax.set_yticks([.5,1.5,2.5,3.5,4.5,5.5])
ax.set_yticklabels(labels)
ax.set_ylabel("Mutant")
ax.set_xlabel("Majority")
plt.colorbar(fig)
self_learning = []
for i in range(6):
self_learning.append(results[i][i])
print(self_learning)
for minority in range(6):
for majority in range(6):
if results[minority][majority] > self_learning[majority]:
print(learners[minority], end=' ')
print('invades a population of', end=' ')
print(learners[majority])
elif results[minority][majority] < self_learning[majority]:
print(learners[minority], end=' ')
print('dies out in a population of', end=' ')
print(learners[majority])
new_results = []
for minority in range(6):
new_row = []
for majority in range(6):
new_row.append(results[minority][majority] / self_learning[majority])
new_results.append(new_row)
fig, ax = plt.subplots(1, 1)
fig = ax.imshow(new_results, extent=[0,6,6,0], cmap='coolwarm')
labels = ['.6 S', '.7 S', '.8 S', '.6 M', '.7 M', '.8 M']
ax.set_xticks([.5,1.5,2.5,3.5,4.5,5.5])
ax.set_xticklabels(labels)
ax.set_yticks([.5,1.5,2.5,3.5,4.5,5.5])
ax.set_yticklabels(labels)
ax.set_ylabel("Mutant")
ax.set_xlabel("Majority")
plt.colorbar(fig)
| 0.559771 | 0.967869 |
```
from google.colab import drive
drive.mount('/content/gdrive')
import os
# For sending GET requests from the API
import requests
# For saving access tokens and for file management when creating and adding to the dataset
import os
# For dealing with json responses we receive from the API
import json
# For displaying the data after
import pandas as pd
# For saving the response data in CSV format
import csv
# For parsing the dates received from twitter in readable formats
import datetime
import dateutil.parser
import unicodedata
#To add wait time between requests
import time
import datetime
import glob
os.environ['TOKEN'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
cd gdrive/My Drive/TFM/
```
#classes and methods definitions
```
def auth():
return os.getenv('TOKEN')
def create_headers(bearer_token):
headers = {"Authorization": "Bearer {}".format(bearer_token)}
return headers
def create_url(keyword, start_date, end_date, max_results = 10):
search_url = "https://api.twitter.com/2/tweets/counts/all" #Change to the endpoint you want to collect data from
#change params based on the endpoint you are using
query_params = {'query': keyword,
'start_time': start_date,
'end_time': end_date,
'granularity':'hour',
'next_token': {}}
return (search_url, query_params)
def connect_to_endpoint(url, headers, params, next_token = None):
params['next_token'] = next_token #params object received from create_url function
response = requests.request("GET", url, headers = headers, params = params)
print("Endpoint Response Code: " + str(response.status_code))
if response.status_code != 200:
raise Exception(response.status_code, response.text)
return response.json()
def append_to_csv(json_response, fileName):
#A counter variable
counter = 0
#Open OR create the target CSV file
csvFile = open(fileName, "a", newline="", encoding='utf-8')
csvWriter = csv.writer(csvFile)
#Loop through each tweet
for tweet in json_response['data']:
# We will create a variable for each since some of the keys might not exist for some tweets
# So we will account for that
# 1. Author ID
count = tweet['tweet_count']
# 2. Time created
end = dateutil.parser.parse(tweet['end'])
res = [count,end]
# Append the result to the CSV file
csvWriter.writerow(res)
counter += 1
# When done, close the CSV file
csvFile.close()
# Print the number of tweets for this iteration
print("# of Tweets added from this response: ", counter)
```
#API Retrieval
```
#Inputs for the request
bearer_token = auth()
headers = create_headers(bearer_token)
keyword = "ETH"
start_time = "2021-02-11T16:00:00.000Z"
end_time = "2021-08-29T00:00:00.000Z"
url = create_url(keyword, start_time,end_time)
json_response = connect_to_endpoint(url[0], headers, url[1])
resp = pd.DataFrame.from_dict(json_response['data'])
flag = True
while(flag):
if 'next_token' in json_response['meta']:
print("-------------------")
next_token = json_response['meta']['next_token']
print("Token: ", next_token)
url = create_url(keyword, start_time,end_time)
json_response = connect_to_endpoint(url[0], headers, url[1], next_token)
aux1 = pd.DataFrame.from_dict(json_response['data'])
resp = aux1.append(resp)
time.sleep(5)
else:
break
from datetime import datetime
resp['start'] = resp.start.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%dT%H:%M:%S.%f"))
resp['end'] = resp.end.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%dT%H:%M:%S.%f"))
resp
# raw trade data from https://public.bitmex.com/?prefix=data/trade/
Dollar_bars = pd.DataFrame()
for i,file in enumerate(glob.glob("data/*.csv")):
if i == 0:
Dollar_bars = Dollar_bars.append(pd.read_csv(file))
print('Percentge of files already Loaded:',round((i/len(glob.glob("data/*.csv")))*100,1), '%. There are', len(glob.glob("data/*.csv"))-i, "files left", end='')
else:
Dollar_bars = Dollar_bars.append(pd.read_csv(file))
print('\r Percentge of files already Loaded:',round((i/len(glob.glob("data/*.csv")))*100,1), '%. There are', len(glob.glob("data/*.csv"))-i, "files left",end='', flush=True)
Dollar_bars
import numpy as np
Dollar_bars.index = Dollar_bars['timestamp']
Dollar_bars['timestamp'] = Dollar_bars.timestamp.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%d %H:%M:%S.%f"))
import numpy as np
Dollar_bars['tweet_count'] = np.nan
for index1, row1 in Dollar_bars.iterrows():
count = 0
for index, row in resp.iterrows():
#lagging the signal 1h for selection bias
if (row1['timestamp']+pd.Timedelta('1h') > row['start'] and row1['timestamp']+pd.Timedelta('1h') < row['end']):
count = row['tweet_count']
#Dollar_bars.set_value(index1,'tweet_count',count)
Dollar_bars.at[index1,'tweet_count'] = count
print('\r Timestamp',row1['timestamp'], ' is in between:',row['start'],' and:',row['end'] ,end='',flush=False)
#print('And the number of tweets for that period is: ',count)
Dollar_bars.to_csv('Dollar_bars_tweet_counts_1_final.csv')
!cp Dollar_bars_tweet_counts_1_final.csv "gdrive/My Drive/TFM/Dollar_bars_tweet_counts_1_final.csv"
```
#Second API retrieval
```
#Inputs for the request
bearer_token = auth()
headers = create_headers(bearer_token)
keyword = "Ethereum"
start_time = "2021-02-13T16:00:00.000Z"
end_time = "2021-08-29T00:00:00.000Z"
url = create_url(keyword, start_time,end_time)
json_response = connect_to_endpoint(url[0], headers, url[1])
resp = pd.DataFrame.from_dict(json_response['data'])
flag = True
while(flag):
if 'next_token' in json_response['meta']:
print("-------------------")
next_token = json_response['meta']['next_token']
print("Token: ", next_token)
url = create_url(keyword, start_time,end_time)
json_response = connect_to_endpoint(url[0], headers, url[1], next_token)
aux1 = pd.DataFrame.from_dict(json_response['data'])
resp = aux1.append(resp)
time.sleep(5)
else:
break
from datetime import datetime
resp['start'] = resp.start.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%dT%H:%M:%S.%f"))
resp['end'] = resp.end.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%dT%H:%M:%S.%f"))
resp
# raw trade data from https://public.bitmex.com/?prefix=data/trade/
Dollar_bars = pd.DataFrame()
for i,file in enumerate(glob.glob("data/*.csv")):
if i == 0:
Dollar_bars = Dollar_bars.append(pd.read_csv(file))
print('Percentge of files already Loaded:',round((i/len(glob.glob("data/*.csv")))*100,1), '%. There are', len(glob.glob("data/*.csv"))-i, "files left", end='')
else:
Dollar_bars = Dollar_bars.append(pd.read_csv(file))
print('\r Percentge of files already Loaded:',round((i/len(glob.glob("data/*.csv")))*100,1), '%. There are', len(glob.glob("data/*.csv"))-i, "files left",end='', flush=True)
Dollar_bars.drop(columns=['timestamp.1'], inplace=True)
Dollar_bars.index = Dollar_bars['timestamp']
Dollar_bars['timestamp'] = Dollar_bars.timestamp.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%d %H:%M:%S.%f"))
Dollar_bars
import numpy as np
Dollar_bars['tweet_count2'] = np.nan
for index1, row1 in Dollar_bars.iterrows():
count = 0
for index, row in resp.iterrows():
if (row1['timestamp']+pd.Timedelta('1h') > row['start'] and row1['timestamp']+pd.Timedelta('1h') < row['end']):
count = row['tweet_count']
#Dollar_bars.set_value(index1,'tweet_count',count)
Dollar_bars.at[index1,'tweet_count2'] = count
print('\r Timestamp',row1['timestamp'], ' is in between:',row['start'],' and:',row['end'] ,end='',flush=False)
#print('And the number of tweets for that period is: ',count)
Dollar_bars
Dollar_bars.to_csv('Dollar_bars_tweet_counts_2_final.csv')
!cp Dollar_bars_tweet_counts_2_final.csv "gdrive/My Drive/TFM/Dollar_bars_tweet_counts_2_final.csv"
!lscpu |grep 'Model name'
!free -h --si | awk '/Mem:/{print $2}'
```
|
github_jupyter
|
from google.colab import drive
drive.mount('/content/gdrive')
import os
# For sending GET requests from the API
import requests
# For saving access tokens and for file management when creating and adding to the dataset
import os
# For dealing with json responses we receive from the API
import json
# For displaying the data after
import pandas as pd
# For saving the response data in CSV format
import csv
# For parsing the dates received from twitter in readable formats
import datetime
import dateutil.parser
import unicodedata
#To add wait time between requests
import time
import datetime
import glob
os.environ['TOKEN'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
cd gdrive/My Drive/TFM/
def auth():
return os.getenv('TOKEN')
def create_headers(bearer_token):
headers = {"Authorization": "Bearer {}".format(bearer_token)}
return headers
def create_url(keyword, start_date, end_date, max_results = 10):
search_url = "https://api.twitter.com/2/tweets/counts/all" #Change to the endpoint you want to collect data from
#change params based on the endpoint you are using
query_params = {'query': keyword,
'start_time': start_date,
'end_time': end_date,
'granularity':'hour',
'next_token': {}}
return (search_url, query_params)
def connect_to_endpoint(url, headers, params, next_token = None):
params['next_token'] = next_token #params object received from create_url function
response = requests.request("GET", url, headers = headers, params = params)
print("Endpoint Response Code: " + str(response.status_code))
if response.status_code != 200:
raise Exception(response.status_code, response.text)
return response.json()
def append_to_csv(json_response, fileName):
#A counter variable
counter = 0
#Open OR create the target CSV file
csvFile = open(fileName, "a", newline="", encoding='utf-8')
csvWriter = csv.writer(csvFile)
#Loop through each tweet
for tweet in json_response['data']:
# We will create a variable for each since some of the keys might not exist for some tweets
# So we will account for that
# 1. Author ID
count = tweet['tweet_count']
# 2. Time created
end = dateutil.parser.parse(tweet['end'])
res = [count,end]
# Append the result to the CSV file
csvWriter.writerow(res)
counter += 1
# When done, close the CSV file
csvFile.close()
# Print the number of tweets for this iteration
print("# of Tweets added from this response: ", counter)
#Inputs for the request
bearer_token = auth()
headers = create_headers(bearer_token)
keyword = "ETH"
start_time = "2021-02-11T16:00:00.000Z"
end_time = "2021-08-29T00:00:00.000Z"
url = create_url(keyword, start_time,end_time)
json_response = connect_to_endpoint(url[0], headers, url[1])
resp = pd.DataFrame.from_dict(json_response['data'])
flag = True
while(flag):
if 'next_token' in json_response['meta']:
print("-------------------")
next_token = json_response['meta']['next_token']
print("Token: ", next_token)
url = create_url(keyword, start_time,end_time)
json_response = connect_to_endpoint(url[0], headers, url[1], next_token)
aux1 = pd.DataFrame.from_dict(json_response['data'])
resp = aux1.append(resp)
time.sleep(5)
else:
break
from datetime import datetime
resp['start'] = resp.start.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%dT%H:%M:%S.%f"))
resp['end'] = resp.end.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%dT%H:%M:%S.%f"))
resp
# raw trade data from https://public.bitmex.com/?prefix=data/trade/
Dollar_bars = pd.DataFrame()
for i,file in enumerate(glob.glob("data/*.csv")):
if i == 0:
Dollar_bars = Dollar_bars.append(pd.read_csv(file))
print('Percentge of files already Loaded:',round((i/len(glob.glob("data/*.csv")))*100,1), '%. There are', len(glob.glob("data/*.csv"))-i, "files left", end='')
else:
Dollar_bars = Dollar_bars.append(pd.read_csv(file))
print('\r Percentge of files already Loaded:',round((i/len(glob.glob("data/*.csv")))*100,1), '%. There are', len(glob.glob("data/*.csv"))-i, "files left",end='', flush=True)
Dollar_bars
import numpy as np
Dollar_bars.index = Dollar_bars['timestamp']
Dollar_bars['timestamp'] = Dollar_bars.timestamp.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%d %H:%M:%S.%f"))
import numpy as np
Dollar_bars['tweet_count'] = np.nan
for index1, row1 in Dollar_bars.iterrows():
count = 0
for index, row in resp.iterrows():
#lagging the signal 1h for selection bias
if (row1['timestamp']+pd.Timedelta('1h') > row['start'] and row1['timestamp']+pd.Timedelta('1h') < row['end']):
count = row['tweet_count']
#Dollar_bars.set_value(index1,'tweet_count',count)
Dollar_bars.at[index1,'tweet_count'] = count
print('\r Timestamp',row1['timestamp'], ' is in between:',row['start'],' and:',row['end'] ,end='',flush=False)
#print('And the number of tweets for that period is: ',count)
Dollar_bars.to_csv('Dollar_bars_tweet_counts_1_final.csv')
!cp Dollar_bars_tweet_counts_1_final.csv "gdrive/My Drive/TFM/Dollar_bars_tweet_counts_1_final.csv"
#Inputs for the request
bearer_token = auth()
headers = create_headers(bearer_token)
keyword = "Ethereum"
start_time = "2021-02-13T16:00:00.000Z"
end_time = "2021-08-29T00:00:00.000Z"
url = create_url(keyword, start_time,end_time)
json_response = connect_to_endpoint(url[0], headers, url[1])
resp = pd.DataFrame.from_dict(json_response['data'])
flag = True
while(flag):
if 'next_token' in json_response['meta']:
print("-------------------")
next_token = json_response['meta']['next_token']
print("Token: ", next_token)
url = create_url(keyword, start_time,end_time)
json_response = connect_to_endpoint(url[0], headers, url[1], next_token)
aux1 = pd.DataFrame.from_dict(json_response['data'])
resp = aux1.append(resp)
time.sleep(5)
else:
break
from datetime import datetime
resp['start'] = resp.start.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%dT%H:%M:%S.%f"))
resp['end'] = resp.end.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%dT%H:%M:%S.%f"))
resp
# raw trade data from https://public.bitmex.com/?prefix=data/trade/
Dollar_bars = pd.DataFrame()
for i,file in enumerate(glob.glob("data/*.csv")):
if i == 0:
Dollar_bars = Dollar_bars.append(pd.read_csv(file))
print('Percentge of files already Loaded:',round((i/len(glob.glob("data/*.csv")))*100,1), '%. There are', len(glob.glob("data/*.csv"))-i, "files left", end='')
else:
Dollar_bars = Dollar_bars.append(pd.read_csv(file))
print('\r Percentge of files already Loaded:',round((i/len(glob.glob("data/*.csv")))*100,1), '%. There are', len(glob.glob("data/*.csv"))-i, "files left",end='', flush=True)
Dollar_bars.drop(columns=['timestamp.1'], inplace=True)
Dollar_bars.index = Dollar_bars['timestamp']
Dollar_bars['timestamp'] = Dollar_bars.timestamp.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%d %H:%M:%S.%f"))
Dollar_bars
import numpy as np
Dollar_bars['tweet_count2'] = np.nan
for index1, row1 in Dollar_bars.iterrows():
count = 0
for index, row in resp.iterrows():
if (row1['timestamp']+pd.Timedelta('1h') > row['start'] and row1['timestamp']+pd.Timedelta('1h') < row['end']):
count = row['tweet_count']
#Dollar_bars.set_value(index1,'tweet_count',count)
Dollar_bars.at[index1,'tweet_count2'] = count
print('\r Timestamp',row1['timestamp'], ' is in between:',row['start'],' and:',row['end'] ,end='',flush=False)
#print('And the number of tweets for that period is: ',count)
Dollar_bars
Dollar_bars.to_csv('Dollar_bars_tweet_counts_2_final.csv')
!cp Dollar_bars_tweet_counts_2_final.csv "gdrive/My Drive/TFM/Dollar_bars_tweet_counts_2_final.csv"
!lscpu |grep 'Model name'
!free -h --si | awk '/Mem:/{print $2}'
| 0.174903 | 0.45308 |
# Convolutional Layer
In this notebook, we visualize four filtered outputs (a.k.a. activation maps) of a convolutional layer.
In this example, *we* are defining four filters that are applied to an input image by initializing the **weights** of a convolutional layer, but a trained CNN will learn the values of these weights.
<img src='notebook_ims/conv_layer.gif' height=60% width=60% />
### Import the image
```
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = 'data/udacity_sdc.png'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32")/255
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
```
### Define and visualize the filters
```
import numpy as np
## TODO: Feel free to modify the numbers here, to try out another filter!
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)
# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# For an example, print out the values of filter 1
print('Filter 1: \n', filter_1)
# visualize all four filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y]<0 else 'black')
```
## Define a convolutional layer
The various layers that make up any neural network are documented, [here](http://pytorch.org/docs/stable/nn.html). For a convolutional neural network, we'll start by defining a:
* Convolutional layer
Initialize a single convolutional layer so that it contains all your created filters. Note that you are not training this network; you are initializing the weights in a convolutional layer so that you can visualize what happens after a forward pass through this network!
#### `__init__` and `forward`
To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the forward behavior of a network that applyies those initialized layers to an input (`x`) in the function `forward`. In PyTorch we convert all inputs into the Tensor datatype, which is similar to a list data type in Python.
Below, I define the structure of a class called `Net` that has a convolutional layer that can contain four 4x4 grayscale filters.
```
import torch
import torch.nn as nn
import torch.nn.functional as F
# define a neural network with a single convolutional layer with four filters
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# assumes there are 4 grayscale filters
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
# calculates the output of a convolutional layer
# pre- and post-activation
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# returns both layers
return conv_x, activated_x
# instantiate the model and set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
model = Net(weight)
# print out the layer in the network
print(model)
```
### Visualize the output of each filter
First, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through.
```
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
```
Let's look at the output of a convolutional layer, before and after a ReLu activation function is applied.
```
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
# get the convolutional layer (pre and post activation)
conv_layer, activated_layer = model(gray_img_tensor)
# visualize the output of a conv layer
viz_layer(conv_layer)
```
#### ReLu activation
In this model, we've used an activation function that scales the output of the convolutional layer. We've chose a ReLu function to do this, and this function simply turns all negative pixel values in 0's (black). See the equation pictured below for input pixel values, `x`.
<img src='notebook_ims/relu_ex.png' height=50% width=50% />
```
# after a ReLu is applied
# visualize the output of an activated conv layer
viz_layer(activated_layer)
```
|
github_jupyter
|
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = 'data/udacity_sdc.png'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32")/255
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
import numpy as np
## TODO: Feel free to modify the numbers here, to try out another filter!
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)
# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# For an example, print out the values of filter 1
print('Filter 1: \n', filter_1)
# visualize all four filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y]<0 else 'black')
import torch
import torch.nn as nn
import torch.nn.functional as F
# define a neural network with a single convolutional layer with four filters
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# assumes there are 4 grayscale filters
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
# calculates the output of a convolutional layer
# pre- and post-activation
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# returns both layers
return conv_x, activated_x
# instantiate the model and set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
model = Net(weight)
# print out the layer in the network
print(model)
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
# get the convolutional layer (pre and post activation)
conv_layer, activated_layer = model(gray_img_tensor)
# visualize the output of a conv layer
viz_layer(conv_layer)
# after a ReLu is applied
# visualize the output of an activated conv layer
viz_layer(activated_layer)
| 0.626238 | 0.987092 |
# Projection plots for CERN HL YR
David Straub, 2018
```
import flavio
from wilson import Wilson
flavio.__version__
```
NP scenarios
```
w1 = Wilson({'C9_bsmumu': -1}, scale=4.8, eft='WET', basis='flavio')
w2 = Wilson({'C9_bsmumu': -1, 'C9p_bsmumu': 1}, scale=4.8, eft='WET', basis='flavio')
```
Observables
```
obs = [
('<Rmue>(B+->Kll)', 1, 6),
('<Rmue>(B0->K*ll)', 1.1, 6),
('<Rmue>(Bs->phill)', 1.1, 6),
]
obs_belle = [
('<Rmue>(B+->Kll)', 1, 6),
('<Rmue>(B0->K*ll)', 1.1, 6),
]
```
Central NP predictions
```
for o in obs:
print(o, flavio.np_prediction(o[0], w1, *o[1:]), flavio.np_prediction(o[0], w2, *o[1:]))
```
Load measurements (projections)
```
proj = flavio.measurements.read_file('./yr_projections.yaml')
from flavio.statistics.likelihood import FastLikelihood
import flavio.plots as fpl
```
Compute SM uncertainties (even if negligible)
```
%%time
smcov = flavio.sm_covariance(obs_list=obs, N=100, threads=4)
```
Initialize likelihoods
```
%%time
llh = {}
for meas in proj:
try:
observables = obs
llh[meas] = FastLikelihood(meas, observables=observables, include_measurements=[meas])
except AssertionError:
observables = obs_belle
llh[meas] = FastLikelihood(meas, observables=observables, include_measurements=[meas])
smcov_dict = dict(covariance=smcov, observables=observables)
llh[meas].sm_covariance.load_dict(smcov_dict)
llh[meas].make_measurement()
par = flavio.default_parameters.get_central_all()
```
Compute plot data
```
%%time
pdat = {}
for meas in proj:
def log_likelihood(x):
C9, C9p = x
w = Wilson({'C9_bsmumu': C9, 'C9p_bsmumu': C9p}, scale=4.8, eft='WET', basis='flavio')
return llh[meas].log_likelihood(par, w)
pdat[meas] = fpl.likelihood_contour_data(log_likelihood, -2, 1, -1.5, 1.5, steps=40, threads=4, n_sigma=(1, 2, 3, 4, 5))
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
Hack to change number of sigma contours
```
def makesigma(pdat, levels):
_pdat = pdat.copy()
_pdat['levels'] = [0] + [flavio.statistics.functions.delta_chi2(n, dof=2) for n in levels]
return _pdat
```
Save data file for later
```
import pickle
with open('pdat_YR_C9_C9p.p', 'wb') as f:
pickle.dump(pdat, f)
```
Plot
```
plt.figure(figsize=(6, 6))
fpl.contour(**makesigma(pdat['YR projection Current LHCb'], (3,)), label=r'LHCb present $3\sigma$', col=0, filled=False, contour_args=dict(linestyles=':'))
fpl.contour(**makesigma(pdat['YR projection NP LHCb 2025'], (1,2,3,4,5)), label='NP LHCb 2025', col=1, filled=False, interpolation_factor=10, contour_args=dict(linestyles='--'))
fpl.contour(**makesigma(pdat['YR projection NP Belle-II'], (1,2,3,4,5)), label='NP Belle-II', col=2, filled=False, interpolation_factor=10)
fpl.contour(**makesigma(pdat['YR projection NP LHCb Upgrade II'], (1,2,3,4,5)), label='NP LHCb Upgrade II', col=1, interpolation_factor=10, filled=False)
fpl.contour(**makesigma(pdat['YR projection SM LHCb Upgrade II'], (1,2,3,4,5)), label='SM LHCb Upgrade II', col=3, interpolation_factor=10, filled=False)
plt.xlabel(r'$C_9^{bs\mu\mu}$')
plt.ylabel(r'$C_9^{\prime\,bs\mu\mu}$')
plt.scatter([0], [0], marker='*', label='SM', c='k')
plt.scatter([-1], [0], marker='.', label='NP $C_9$', c='k')
plt.scatter([-1], [1], marker='x', label=r'NP $C_9^{(\prime)}$', c='k')
plt.xlim([-1.8, 0.8])
plt.ylim([-1.3, 1.3])
plt.legend(loc='lower right');
fpl.flavio_branding(version=True)
plt.savefig('YR_C9_C9p.pdf', bbox_inches='tight')
```
|
github_jupyter
|
import flavio
from wilson import Wilson
flavio.__version__
w1 = Wilson({'C9_bsmumu': -1}, scale=4.8, eft='WET', basis='flavio')
w2 = Wilson({'C9_bsmumu': -1, 'C9p_bsmumu': 1}, scale=4.8, eft='WET', basis='flavio')
obs = [
('<Rmue>(B+->Kll)', 1, 6),
('<Rmue>(B0->K*ll)', 1.1, 6),
('<Rmue>(Bs->phill)', 1.1, 6),
]
obs_belle = [
('<Rmue>(B+->Kll)', 1, 6),
('<Rmue>(B0->K*ll)', 1.1, 6),
]
for o in obs:
print(o, flavio.np_prediction(o[0], w1, *o[1:]), flavio.np_prediction(o[0], w2, *o[1:]))
proj = flavio.measurements.read_file('./yr_projections.yaml')
from flavio.statistics.likelihood import FastLikelihood
import flavio.plots as fpl
%%time
smcov = flavio.sm_covariance(obs_list=obs, N=100, threads=4)
%%time
llh = {}
for meas in proj:
try:
observables = obs
llh[meas] = FastLikelihood(meas, observables=observables, include_measurements=[meas])
except AssertionError:
observables = obs_belle
llh[meas] = FastLikelihood(meas, observables=observables, include_measurements=[meas])
smcov_dict = dict(covariance=smcov, observables=observables)
llh[meas].sm_covariance.load_dict(smcov_dict)
llh[meas].make_measurement()
par = flavio.default_parameters.get_central_all()
%%time
pdat = {}
for meas in proj:
def log_likelihood(x):
C9, C9p = x
w = Wilson({'C9_bsmumu': C9, 'C9p_bsmumu': C9p}, scale=4.8, eft='WET', basis='flavio')
return llh[meas].log_likelihood(par, w)
pdat[meas] = fpl.likelihood_contour_data(log_likelihood, -2, 1, -1.5, 1.5, steps=40, threads=4, n_sigma=(1, 2, 3, 4, 5))
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
def makesigma(pdat, levels):
_pdat = pdat.copy()
_pdat['levels'] = [0] + [flavio.statistics.functions.delta_chi2(n, dof=2) for n in levels]
return _pdat
import pickle
with open('pdat_YR_C9_C9p.p', 'wb') as f:
pickle.dump(pdat, f)
plt.figure(figsize=(6, 6))
fpl.contour(**makesigma(pdat['YR projection Current LHCb'], (3,)), label=r'LHCb present $3\sigma$', col=0, filled=False, contour_args=dict(linestyles=':'))
fpl.contour(**makesigma(pdat['YR projection NP LHCb 2025'], (1,2,3,4,5)), label='NP LHCb 2025', col=1, filled=False, interpolation_factor=10, contour_args=dict(linestyles='--'))
fpl.contour(**makesigma(pdat['YR projection NP Belle-II'], (1,2,3,4,5)), label='NP Belle-II', col=2, filled=False, interpolation_factor=10)
fpl.contour(**makesigma(pdat['YR projection NP LHCb Upgrade II'], (1,2,3,4,5)), label='NP LHCb Upgrade II', col=1, interpolation_factor=10, filled=False)
fpl.contour(**makesigma(pdat['YR projection SM LHCb Upgrade II'], (1,2,3,4,5)), label='SM LHCb Upgrade II', col=3, interpolation_factor=10, filled=False)
plt.xlabel(r'$C_9^{bs\mu\mu}$')
plt.ylabel(r'$C_9^{\prime\,bs\mu\mu}$')
plt.scatter([0], [0], marker='*', label='SM', c='k')
plt.scatter([-1], [0], marker='.', label='NP $C_9$', c='k')
plt.scatter([-1], [1], marker='x', label=r'NP $C_9^{(\prime)}$', c='k')
plt.xlim([-1.8, 0.8])
plt.ylim([-1.3, 1.3])
plt.legend(loc='lower right');
fpl.flavio_branding(version=True)
plt.savefig('YR_C9_C9p.pdf', bbox_inches='tight')
| 0.435661 | 0.85753 |
```
# Aquí van las librerías y funciones que va a usar
%matplotlib inline
import matplotlib.pyplot as plt
import sympy as sym
from sympy import oo
sym.init_printing()
## Puede que requiera incorporar más librerías o definir nuevas funciones. Hágalo a continuación
```
## Tarea
Nombres: **Ponga aquí sus nombres completos separados por coma**
Suponga que tiene un sistema de tiempo continuo que se excita con una entrada $x(t)$ y responde con una señal $y(t)$, como lo muestra la figura:

Analice el modelo del sistema para los modelos en cada uno de los casos siguientes:
|Caso | Ecuación |
|------|----------------------------------------------------------------------------------------|
| A | \begin{equation} \frac{dy}{dt} + 5y(t) = 5x(t) \end{equation} |
| B | \begin{equation} \frac{dy}{dt} - 5y(t) = 5x(t) \end{equation} |
| C | \begin{equation} \frac{d^{2}y}{dt^{2}} + 5\frac{dy}{dt} + y(t) = x(t) \end{equation} |
| D | \begin{equation} \frac{d^{2}y}{dt^{2}} + y(t) = x(t) \end{equation} |
### Análisis
- Lleve la ecuación diferencial al dominio de las frecuencias usando la transformada de Laplace.
\begin{equation}
EscribaAquíLaEcuaciónTransformada
\end{equation}
- Encuentre la función de transferencia del sistema.
\begin{equation}
F_A(s) = EscribaAquíLaFunciónDeTransferencia
\end{equation}
- Grafique el mapa de polos y ceros
- Grafique la respuesta al escalón
```
## Aquí va el código para generar la gráfica pedida. Ejecute el código para generar la gráfica.
```
- Analice las gráfica obtenidas, escriba su análisis y determine la estabilidad del sistema y el tipo de amortiguamiento.
- Escriba aquí su discusión.
- Puede usar viñetas o párrafos.
- Conserve las sangrías para facilitar la lectura.
```
## Aquí va el código extra que puede requerir para responder a las preguntas.
```
Suponga que los sistemas $B$ y $C$ interactúan de manera que la salida de $B$ es la entrada de $C$.
- ¿Cuál es la función de transferencia equivalente para estos sistemas conectados?
- Grafique el mapa de polos y ceros
- Grafique la respuesta al escalón
```
## Aquí va el código para generar la gráfica pedida. Ejecute el código para generar la gráfica.
```
- Analice la gráfica obtenida, escriba su análisis y determine la estabilidad del sistema y el tipo de amortiguamiento.
- Escriba aquí su discusión.
- Puede usar viñetas o párrafos.
- Conserve las sangrías para facilitar la lectura.
## Análisis comparativo
Con base en los resultados anteriores, realice un análisis comparativo y escriba sus observaciones
- Escriba aquí sus respuestas
- Puede usar viñetas
## Conclusiones
Escriba sus conclusiones
- Use viñetas
|
github_jupyter
|
# Aquí van las librerías y funciones que va a usar
%matplotlib inline
import matplotlib.pyplot as plt
import sympy as sym
from sympy import oo
sym.init_printing()
## Puede que requiera incorporar más librerías o definir nuevas funciones. Hágalo a continuación
## Aquí va el código para generar la gráfica pedida. Ejecute el código para generar la gráfica.
## Aquí va el código extra que puede requerir para responder a las preguntas.
## Aquí va el código para generar la gráfica pedida. Ejecute el código para generar la gráfica.
| 0.245808 | 0.969149 |
[View in Colaboratory](https://colab.research.google.com/github/corykendrick/fastai_in_colab/blob/master/Using_Google_Colab_for_Fastai.ipynb)
# Using Google Colab for Fast.ai
Welcome! Here is my one-stop-shop for getting all the Fast.ai lessons to work on Google Colab. I'll be updating this as I work through new lessons. Let me know if you have suggestions or improvements at @corythesaurus (DM me on Twitter).
My general workflow is to open each Fast.ai notebook and make a copy of it to save in my Drive, so I can add in my own cells as needed (and save them for later!). You can do that from within Colab: *File > Open Notebook... > click on "Github" tab > search for "fastai"*. All the notebooks should be there. Once you open a notebook, you can make a copy of it: *File > Save a copy in Drive...*.
Finally, make sure you've enabled the GPU! *Edit > Notebook settings > set "Hardware Accelerator" to GPU.*
## Installing dependencies ##
We need to manually install fastai and pytorch. And maybe other things that fastai depends on (see [here](https://github.com/fastai/fastai/blob/master/requirements.txt)).
I will be referring to [this fastai forum thread](http://forums.fast.ai/t/colaboratory-and-fastai/10122/6) and [this blogpost](https://towardsdatascience.com/fast-ai-lesson-1-on-google-colab-free-gpu-d2af89f53604) if I get stuck. This is also a handy resource for using pytorch in colab: https://jovianlin.io/pytorch-with-gpu-in-google-colab/ (and his [example notebook](https://colab.research.google.com/drive/1jxUPzMsAkBboHMQtGyfv5M5c7hU8Ss2c#scrollTo=ed-8FUn2GqQ4)!). And this [post](https://medium.com/@chsafouane/getting-started-with-pytorch-on-google-colab-811c59a656b6).
```
# Check python version
import sys
sys.version
# Install fastai
!pip3 install fastai
# Install PyTorch
# I haven't needed to do this, but here's how just in case.
!pip3 install http://download.pytorch.org/whl/cu80/torch-0.3.0.post4-cp36-cp36m-linux_x86_64.whl
!pip3 install torchvision
```
### Special additions for particular lessons
```
# Lesson 4
!pip3 install spacy
!python -m spacy download en
```
## Import all the libraries ##
```
# This file contains all the main external libs we'll use
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
```
## GPU setup ##
Google is very generous and gives access to a GPU for CoLab users. Make sure it's enabled: Edit > Notebook settings > set "Hardware accelerator" to GPU.
The following is just to assuage your fears that you're being rate-limited or otherwise; you don't need to add these cells to your notebooks to get them to run. Just make sure you've enabled the GPU in the notebook settings. This is easy to forget :)
### Check that the GPU is available
```
torch.cuda.is_available()
torch.backends.cudnn.enabled
```
### Check how much of the GPU is available
I'm using the following code from [a stackoverflow thread](https://stackoverflow.com/questions/48750199/google-colaboratory-misleading-information-about-its-gpu-only-5-ram-available
) to check what % of the GPU is being utilized right now. 100% is bad; 0% is good (all free for me to use!).
```
# memory footprint support libraries/code
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
```
## Cloning the fastai git repo ##
You likely don't actually need to do this, but if you want direct access to the .xls files, or want to inspect or fork their code... clone the fastai repository!
```
!git clone https://github.com/fastai/courses.git
!pwd
!ls courses
!ls courses/deeplearning1
!ls courses/deeplearning1/excel
```
## Accessing the fastai data files (lessons 1, 3, 4) ##
If you get a fastai URL to a .zip or .tgz - follow these directions to import the data into your notebook.
Here's the snippet from Lesson 1: *The dataset is available at http://files.fast.ai/data/dogscats.zip. You can download it directly on your server by running the following line in your terminal. wget http://files.fast.ai/data/dogscats.zip. You should put the data in a subdirectory of this notebook's directory, called data/. Note that this data is already available in Crestle and the Paperspace fast.ai template.*
### If it's a .zip file (lesson 1):
####Lesson 1: Dogs & Cats data
```
# Get the file from fast.ai URL, unzip it, and put it into the folder 'data'
# Warning: I haven't figured out how to make the unzipping less verbose.
!wget http://files.fast.ai/data/dogscats.zip && unzip dogscats.zip -d data/
# Check to make sure the data is where you think it is:
!ls
# Check to make sure the folders all unzipped properly:
!ls data/dogscats
```
### If it's a .tgz file (lesson 3 & 4):
####Lesson 3: Rossmann data
```
# Get the Rossmann data from the fast.ai URL, and make a nested directory to put it in later.
# -p flag from mkdir is to make a parent directory (allows nested directories to be created at once)
!wget http://files.fast.ai/part2/lesson14/rossmann.tgz && mkdir -p ~/data/rossmann
# Unzip the .tgz file
# x for extract
# -v for verbose # NOTE: I usually turn this off; it prints a lot...
# -z for gnuzip
# -f for file (should come at last just before file name)
# -C to extract the zipped contents to a different directory
!tar -xzf rossmann.tgz -C ~/data/rossmann/
# Remove the .tgz file
!rm rossmann.tgz
# Make sure the data's where we think it is:
!ls ~/data/rossmann
```
####Lesson 4: IMDB data
```
# Get the IMDB data from the fastai URL:
!wget http://files.fast.ai/data/aclImdb.tgz
# Make sure it imported properly:
!ls
# Unzip the tgz file
# x for extract
# -v for verbose # NOTE: I usually turn this off; it prints a lot...
# -z for gnuzip
# -f for file (should come at last just before file name)
# -C to extract the zipped contents to a different directory
!tar -xvzf aclImdb.tgz -C data/
# Remove the original .tgz file
!rm aclImdb.tgz
# Make sure the data is where we think it is:
!ls data/aclImdb
```
##Getting data from Kaggle, using the Kaggle CLI (lesson 2)
Install the Kaggle API; authenticate; and then use the Kaggle command line interface to access data.
```
# Install the Kaggle API
!pip3 install kaggle
# Import kaggle.json from Google Drive
# This snippet will output a link which needs authentication from any Google account
from googleapiclient.discovery import build
import io, os
from googleapiclient.http import MediaIoBaseDownload
from google.colab import auth
auth.authenticate_user()
drive_service = build('drive', 'v3')
results = drive_service.files().list(
q="name = 'kaggle.json'", fields="files(id)").execute()
kaggle_api_key = results.get('files', [])
filename = "/content/.kaggle/kaggle.json"
os.makedirs(os.path.dirname(filename), exist_ok=True)
request = drive_service.files().get_media(fileId=kaggle_api_key[0]['id'])
fh = io.FileIO(filename, 'wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
os.chmod(filename, 600)
```
**Now we have the Kaggle API set up!**
Here are a few examples of what we can do now, using the Kaggle API:
```
!kaggle competitions list
!kaggle datasets download -d stanfordu/street-view-house-numbers -w -f street-view-house-numbers.zip
```
More documentation on the Kaggle API here: https://github.com/Kaggle/kaggle-api
**Typical workflow:**
Download the zip file of a dataset:
```
!kaggle datasets download -d
```
And then unzip the file and move to a directory:
```
!unzip street-view-house-numbers.zip
```
Check to make sure it's there:
```
!ls
```
*This post was helpful for this lesson 2 data in particular: http://forums.fast.ai/t/how-to-download-data-for-lesson-2-from-kaggle-for-planet-competition/7684/38*
```
# List the files for the Planet data
!kaggle competitions files -c planet-understanding-the-amazon-from-space
# -c: competition name
# -f: which file you want to download
# -p: path to where the file should be saved
!kaggle competitions download -c planet-understanding-the-amazon-from-space -f train-jpg.tar.7z -p ~/data/planet/
!kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg.tar.7z -p ~/data/planet/
!kaggle competitions download -c planet-understanding-the-amazon-from-space -f train_v2.csv.zip -p ~/data/planet/
# Make sure the data is where you think it is:
!ls ~/data/planet
# In order to unzip the 7z files, need to install p7zip
# This was helpful: http://forums.fast.ai/t/unzipping-tar-7z-files-in-google-collab-notebook/14857/4
!apt-get install p7zip-full
# Unzip the 7zip files
# -d: which file to un7zip
!p7zip -d ~/data/planet/test-jpg.tar.7z
!p7zip -d ~/data/planet/train-jpg.tar.7z
# Unzip the .tar files
!tar -xvf ~/data/planet/test-jpg.tar
!tar -xvf ~/data/planet/train-jpg.tar
# Move the unzipped folders into data/planet/
!mv test-jpg ~/data/planet/ && mv train-jpg ~/data/planet/
# Unzip the regular file
!unzip ~/data/planet/train_v2.csv.zip -d ~/data/planet/
# Make sure everything looks as it should:
!ls ~/data/planet/
```
## Now we're ready to go! ##
|
github_jupyter
|
# Check python version
import sys
sys.version
# Install fastai
!pip3 install fastai
# Install PyTorch
# I haven't needed to do this, but here's how just in case.
!pip3 install http://download.pytorch.org/whl/cu80/torch-0.3.0.post4-cp36-cp36m-linux_x86_64.whl
!pip3 install torchvision
# Lesson 4
!pip3 install spacy
!python -m spacy download en
# This file contains all the main external libs we'll use
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
torch.cuda.is_available()
torch.backends.cudnn.enabled
# memory footprint support libraries/code
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
!git clone https://github.com/fastai/courses.git
!pwd
!ls courses
!ls courses/deeplearning1
!ls courses/deeplearning1/excel
# Get the file from fast.ai URL, unzip it, and put it into the folder 'data'
# Warning: I haven't figured out how to make the unzipping less verbose.
!wget http://files.fast.ai/data/dogscats.zip && unzip dogscats.zip -d data/
# Check to make sure the data is where you think it is:
!ls
# Check to make sure the folders all unzipped properly:
!ls data/dogscats
# Get the Rossmann data from the fast.ai URL, and make a nested directory to put it in later.
# -p flag from mkdir is to make a parent directory (allows nested directories to be created at once)
!wget http://files.fast.ai/part2/lesson14/rossmann.tgz && mkdir -p ~/data/rossmann
# Unzip the .tgz file
# x for extract
# -v for verbose # NOTE: I usually turn this off; it prints a lot...
# -z for gnuzip
# -f for file (should come at last just before file name)
# -C to extract the zipped contents to a different directory
!tar -xzf rossmann.tgz -C ~/data/rossmann/
# Remove the .tgz file
!rm rossmann.tgz
# Make sure the data's where we think it is:
!ls ~/data/rossmann
# Get the IMDB data from the fastai URL:
!wget http://files.fast.ai/data/aclImdb.tgz
# Make sure it imported properly:
!ls
# Unzip the tgz file
# x for extract
# -v for verbose # NOTE: I usually turn this off; it prints a lot...
# -z for gnuzip
# -f for file (should come at last just before file name)
# -C to extract the zipped contents to a different directory
!tar -xvzf aclImdb.tgz -C data/
# Remove the original .tgz file
!rm aclImdb.tgz
# Make sure the data is where we think it is:
!ls data/aclImdb
# Install the Kaggle API
!pip3 install kaggle
# Import kaggle.json from Google Drive
# This snippet will output a link which needs authentication from any Google account
from googleapiclient.discovery import build
import io, os
from googleapiclient.http import MediaIoBaseDownload
from google.colab import auth
auth.authenticate_user()
drive_service = build('drive', 'v3')
results = drive_service.files().list(
q="name = 'kaggle.json'", fields="files(id)").execute()
kaggle_api_key = results.get('files', [])
filename = "/content/.kaggle/kaggle.json"
os.makedirs(os.path.dirname(filename), exist_ok=True)
request = drive_service.files().get_media(fileId=kaggle_api_key[0]['id'])
fh = io.FileIO(filename, 'wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
os.chmod(filename, 600)
!kaggle competitions list
!kaggle datasets download -d stanfordu/street-view-house-numbers -w -f street-view-house-numbers.zip
!kaggle datasets download -d
!unzip street-view-house-numbers.zip
!ls
# List the files for the Planet data
!kaggle competitions files -c planet-understanding-the-amazon-from-space
# -c: competition name
# -f: which file you want to download
# -p: path to where the file should be saved
!kaggle competitions download -c planet-understanding-the-amazon-from-space -f train-jpg.tar.7z -p ~/data/planet/
!kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg.tar.7z -p ~/data/planet/
!kaggle competitions download -c planet-understanding-the-amazon-from-space -f train_v2.csv.zip -p ~/data/planet/
# Make sure the data is where you think it is:
!ls ~/data/planet
# In order to unzip the 7z files, need to install p7zip
# This was helpful: http://forums.fast.ai/t/unzipping-tar-7z-files-in-google-collab-notebook/14857/4
!apt-get install p7zip-full
# Unzip the 7zip files
# -d: which file to un7zip
!p7zip -d ~/data/planet/test-jpg.tar.7z
!p7zip -d ~/data/planet/train-jpg.tar.7z
# Unzip the .tar files
!tar -xvf ~/data/planet/test-jpg.tar
!tar -xvf ~/data/planet/train-jpg.tar
# Move the unzipped folders into data/planet/
!mv test-jpg ~/data/planet/ && mv train-jpg ~/data/planet/
# Unzip the regular file
!unzip ~/data/planet/train_v2.csv.zip -d ~/data/planet/
# Make sure everything looks as it should:
!ls ~/data/planet/
| 0.374104 | 0.867233 |
# Training pixel classifiers from folders of images
When training pixel classifiers it often makes sense to train using multiple images. For example, when images look differently, especially between conditions, it is necessary to train classifiers with mutiple images from all conditions.
In this notebook we demonstrate how to train [APOC](https://github.com/haesleinhuepf/apoc) classifiers using two folders containing pairs of training data.
```
import apoc
import os
from skimage.io import imread
import pyclesperanto_prototype as cle
import matplotlib.pyplot as plt
```
## Data preparation
We start by selecting two input folders. One with images, and one with sparsely annotated label images. The names of the files in that folder must be pairwise identical. We will also have a quick look into these folders.
For demonstration purposes, we reuse data of the [BBBC007 dataset](https://bbbc.broadinstitute.org/BBBC007) version 1 (Jones et al., Proc. ICCV Workshop on Computer Vision for Biomedical Image Applications, 2005), available from the Broad Bioimage Benchmark Collection [Ljosa et al., Nature Methods, 2012](http://dx.doi.org/10.1038/nmeth.2083).
```
image_folder = "../../data/BBBC007/images/"
masks_folder = "../../data/BBBC007/masks/"
file_list = os.listdir(image_folder)
# show all images
fig, axs = plt.subplots(1, 4, figsize=(15,15))
for i, filename in enumerate(file_list):
image = imread(image_folder + filename)
cle.imshow(image, plot=axs[i])
plt.show()
# show corresponding label images
fig, axs = plt.subplots(1, 4, figsize=(15,15))
for i, filename in enumerate(file_list):
masks = imread(masks_folder + filename)
cle.imshow(masks, plot=axs[i])
plt.show()
```
## Training
If the folders are setup properly, we can pass the folders to the training.
```
# setup classifer and where it should be saved
segmenter = apoc.ObjectSegmenter(opencl_filename="test.cl")
# setup feature set used for training
features = apoc.PredefinedFeatureSet.object_size_1_to_5_px.value
# train classifier on folders
apoc.train_classifier_from_image_folders(
segmenter,
features,
image = image_folder,
ground_truth = masks_folder)
```
## Prediction
After the training, we can apply the classifier to all images in the image folder. The following line reloads the classifier from disk. In that way we can ensure that it was stored correctly.
```
segmenter = apoc.ObjectSegmenter(opencl_filename="test.cl")
# show all images
for i, filename in enumerate(file_list):
fig, axs = plt.subplots(1, 2, figsize=(15,15))
image = imread(image_folder + filename)
cle.imshow(image, plot=axs[0])
labels = segmenter.predict(image)
cle.imshow(labels, plot=axs[1], labels=True)
plt.show()
```
|
github_jupyter
|
import apoc
import os
from skimage.io import imread
import pyclesperanto_prototype as cle
import matplotlib.pyplot as plt
image_folder = "../../data/BBBC007/images/"
masks_folder = "../../data/BBBC007/masks/"
file_list = os.listdir(image_folder)
# show all images
fig, axs = plt.subplots(1, 4, figsize=(15,15))
for i, filename in enumerate(file_list):
image = imread(image_folder + filename)
cle.imshow(image, plot=axs[i])
plt.show()
# show corresponding label images
fig, axs = plt.subplots(1, 4, figsize=(15,15))
for i, filename in enumerate(file_list):
masks = imread(masks_folder + filename)
cle.imshow(masks, plot=axs[i])
plt.show()
# setup classifer and where it should be saved
segmenter = apoc.ObjectSegmenter(opencl_filename="test.cl")
# setup feature set used for training
features = apoc.PredefinedFeatureSet.object_size_1_to_5_px.value
# train classifier on folders
apoc.train_classifier_from_image_folders(
segmenter,
features,
image = image_folder,
ground_truth = masks_folder)
segmenter = apoc.ObjectSegmenter(opencl_filename="test.cl")
# show all images
for i, filename in enumerate(file_list):
fig, axs = plt.subplots(1, 2, figsize=(15,15))
image = imread(image_folder + filename)
cle.imshow(image, plot=axs[0])
labels = segmenter.predict(image)
cle.imshow(labels, plot=axs[1], labels=True)
plt.show()
| 0.348423 | 0.987351 |
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import glob
import h5py
import os
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
# sklearn functions
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split, KFold, GroupShuffleSplit
# load functions from nitorch
sys.path.insert(0,"/analysis/ritter/AD/Budding_Spectral_Analysis/code/nitorch/")
from nitorch.data import load_nifti
from nitorch.transforms import ToTensor, SagittalTranslate, SagittalFlip, \
AxialTranslate, normalization_factors, Normalize, \
IntensityRescale
from nitorch.callbacks import EarlyStopping, ModelCheckpoint
from nitorch.trainer import Trainer
from nitorch.initialization import weights_init
from nitorch.metrics import balanced_accuracy, sensitivity, specificity, auc_score
from nitorch.utils import count_parameters
from nitorch.inference import predict
torch.__version__
gpu = 4
b = 4
num_classes = 2
dtype = np.float64
# Inference on different splits of the holdout set
# holdout_h5 = h5py.File("/analysis/ritter/projects/AD/Budding_Spectral_Analysis/data/ADNI_mci_all.h5", 'r')
train_h5 = h5py.File("/analysis/ritter/projects/AD/Budding_Spectral_Analysis/data/ADNI_training_no_outliers.h5", 'r')
val_h5 = h5py.File("/analysis/ritter/projects/AD/Budding_Spectral_Analysis/data/ADNI_validation_no_outliers.h5", 'r')
holdout_h5 = h5py.File("/analysis/ritter/projects/AD/Budding_Spectral_Analysis/data/ADNI_holdout_no_outliers.h5", 'r')
X_train, y_train = train_h5['X'], train_h5['y']
X_val, y_val = val_h5['X'], val_h5['y']
X_holdout, y_holdout = holdout_h5['X'], holdout_h5['y']
mean_std_normalization = False
min_max_normalization = True
# normalize min-max
X_train = np.array(X_train)
X_val = np.array(X_val)
X_holdout = np.array(X_holdout)
y_train = np.array(y_train)
y_val = np.array(y_val)
y_holdout = np.array(y_holdout)
if mean_std_normalization:
mean = np.mean(X_train)
std = np.std(X_train)
X_train = (X_train - mean) / std
X_val = (X_val - mean) / std
X_holdout = (X_holdout - mean) / std
if min_max_normalization:
for i in range(len(X_train)):
X_train[i] -= np.min(X_train[i])
X_train[i] /= np.max(X_train[i])
for i in range(len(X_val)):
X_val[i] -= np.min(X_val[i])
X_val[i] /= np.max(X_val[i])
for i in range(len(X_holdout)):
X_holdout[i] -= np.min(X_holdout[i])
X_holdout[i] /= np.max(X_holdout[i])
class ADNIDataset(Dataset):
def __init__(self, X, y, transform=None, target_transform=None, mask=None, z_factor=None, dtype=np.float32, num_classes=2):
self.X = np.copy(X)
self.y = np.copy(y)
self.X = X
self.y = y
self.transform = transform
self.target_transform = target_transform
self.mask = mask
self.z_factor = z_factor
self.dtype = dtype
self.num_classes = num_classes
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
image = self.X[idx]
label_tensor = np.zeros(shape=(self.num_classes,))
label = self.y[idx] >= 0.5
label = torch.LongTensor([label])
if self.transform:
image = self.transform(image)
sample = {"image" : image,
"label" : label}
return sample
augmentations = [SagittalFlip(), SagittalTranslate(dist=(-2, 3))]
adni_data_train = ADNIDataset(X_train, y_train, transform=transforms.Compose(augmentations + [ToTensor()]), dtype=dtype)
adni_data_val = ADNIDataset(X_val, y_val, transform=transforms.Compose([ToTensor()]), dtype=dtype)
adni_data_test = ADNIDataset(X_holdout, y_holdout, transform=transforms.Compose([ToTensor()]), dtype=dtype)
sample = adni_data_test[50]
img = sample["image"]
print(img.shape)
plt.imshow(img[0][:,:,70], cmap='gray')
```
# Define the classifier
```
class ClassificationModel3D(nn.Module):
"""The model we use in the paper."""
def __init__(self, dropout=0.4, dropout2=0.4):
nn.Module.__init__(self)
self.Conv_1 = nn.Conv3d(1, 8, 3, bias = False)
self.Conv_1_bn = nn.BatchNorm3d(8)
self.Conv_1_mp = nn.MaxPool3d(2)
self.Conv_2 = nn.Conv3d(8, 16, 3, bias = False)
self.Conv_2_bn = nn.BatchNorm3d(16)
self.Conv_2_mp = nn.MaxPool3d(3)
self.Conv_3 = nn.Conv3d(16, 32, 3, bias = False)
self.Conv_3_bn = nn.BatchNorm3d(32)
self.Conv_3_mp = nn.MaxPool3d(2)
self.Conv_4 = nn.Conv3d(32, 64, 3, bias = False)
self.Conv_4_bn = nn.BatchNorm3d(64)
self.Conv_4_mp = nn.MaxPool3d(3)
self.dense_1 = nn.Linear(2304, 128)
self.dense_2 = nn.Linear(128, 2)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout2)
def forward(self, x):
x = self.relu(self.Conv_1_bn(self.Conv_1(x)))
x = self.Conv_1_mp(x)
x = self.relu(self.Conv_2_bn(self.Conv_2(x)))
x = self.Conv_2_mp(x)
x = self.relu(self.Conv_3_bn(self.Conv_3(x)))
x = self.Conv_3_mp(x)
x = self.relu(self.Conv_4_bn(self.Conv_4(x)))
x = self.Conv_4_mp(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.relu(self.dense_1(x))
x = self.dropout2(x)
x = self.dense_2(x)
return x
net = ClassificationModel3D().cuda(gpu)
print("Trainable model parameters: {}".format(count_parameters(net)))
```
# Training
```
def run(
net,
data,
shape,
callbacks=[],
augmentations=[],
masked=False,
metrics=[],
k_folds=None,
b=4,
num_epochs=35,
retain_metric=None
):
fold_metric = []
models = []
fold = 0
initial_prepend = None
for trial in range(4):
print("Starting trial {}".format(trial))
# add current fold number to model checkpoint path
if callbacks is not None:
for idx, callback in enumerate(callbacks):
if isinstance(callback, ModelCheckpoint):
if initial_prepend is None:
initial_prepend = callbacks[idx].prepend
callbacks[idx].prepend = initial_prepend + "cv_fold_{}_".format(fold)
fold += 1
# restart model
del net
net = ClassificationModel3D().cuda(gpu)
# reset hyperparameters
lr = 1e-4
wd = 1e-4
criterion = nn.CrossEntropyLoss().cuda(gpu)
optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=wd)
train_loader = DataLoader(
adni_data_train, batch_size=b, num_workers=4, shuffle=True
)
val_loader = DataLoader(
adni_data_val, batch_size=1, num_workers=1, shuffle=True
)
sample = next(iter(train_loader))
img = sample["image"][0]
lbl = sample["label"][0]
plt.imshow(img.squeeze()[:,:,70], cmap='gray')
plt.title(lbl.item())
plt.show()
trainer = Trainer(
net,
criterion,
optimizer,
metrics=metrics,
callbacks=callbacks,
device=gpu,
prediction_type="classification"
)
# train model and store results
net, report = trainer.train_model(
train_loader,
val_loader,
num_epochs=num_epochs,
show_train_steps=60,
show_validation_epochs=1,
)
# append validation score of the retain metric
if isinstance(retain_metric, str):
fold_metric.append(report["val_metrics"][retain_metric][-1])
else:
fold_metric.append(report["val_metrics"][retain_metric.__name__][-1])
models.append(net)
print("Finished fold.")
# visualize result
trainer.visualize_training(report, metrics)
trainer.evaluate_model(val_loader, gpu)
print("################################")
print("################################")
print("All accuracies: {}".format(fold_metric))
return fold_metric, models
num_epochs = 200
min_iters = 3
ignore_epochs = 15
normalize = False
retain_metric = accuracy_score
metrics = [accuracy_score]
r = 0
model_path = "/analysis/ritter/projects/AD/Budding_Spectral_Analysis/models/model_final"
check = ModelCheckpoint(path=model_path,
prepend="repeat_{}".format(r),
store_best=True,
ignore_before=ignore_epochs,
retain_metric=retain_metric)
callbacks = [check, EarlyStopping(patience=8, ignore_before=ignore_epochs, retain_metric="loss", mode='min')]
fold_metric, models = run(net=net, data=adni_data_train,
k_folds=-1,
callbacks=callbacks,
shape=-1,
masked=False,
metrics=metrics,
num_epochs=num_epochs,
retain_metric=retain_metric,
b=b,
)
print(np.mean(fold_metric))
print(np.std(fold_metric))
```
# Start inference
```
model_path = "/analysis/ritter/projects/AD/Budding_Spectral_Analysis/models/model_final"
# load models
models = []
for i in range(4):
model_dir = os.path.join(model_path, "repeat_0_cv_fold_{}_BEST_ITERATION.h5".format(i))
net = ClassificationModel3D()
net.load_state_dict(torch.load(model_dir))
models.append(net)
test_loader = DataLoader(
adni_data_test, batch_size=1, num_workers=1, shuffle=False
)
metrics = []
lr = 1e-5
wd = 1e-3
criterion = nn.BCEWithLogitsLoss().cuda(gpu)
optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=wd)
for fold, model in enumerate(models):
print("Fold {}".format(fold))
all_preds = []
all_labels = []
net = model.cuda(gpu)
net.eval()
with torch.no_grad():
for sample in test_loader:
img = sample["image"]
label = sample["label"]
img = img.to(torch.device("cuda:" + str(gpu)))
output = net.forward(img)
pred = torch.argmax(F.softmax(output, dim=1))
all_preds.append(pred.cpu().numpy().item())
all_labels.append(label.numpy().item())
balanced_acc = balanced_accuracy(all_labels, all_preds)
sens = sensitivity(all_labels, all_preds)
spec = specificity(all_labels, all_preds)
auc = auc_score(all_labels, all_preds)
print(balanced_acc)
'''trainer = Trainer(
net,
criterion,
optimizer,
scheduler=None,
metrics=metrics,
callbacks=None,
device=gpu,
prediction_type="binary"
)
computed_metrics = trainer.evaluate_model(test_loader, metrics=[balanced_accuracy])'''
net.train()
metrics.append((balanced_acc, sens, spec, auc))
print("######## Final results ########")
metrics_df = pd.DataFrame(metrics)
print(metrics_df)
print("Balanced accuracy mean {:.2f} %".format(np.mean(metrics_df[0])*100))
quit()
```
|
github_jupyter
|
%matplotlib inline
%load_ext autoreload
%autoreload 2
import glob
import h5py
import os
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
# sklearn functions
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split, KFold, GroupShuffleSplit
# load functions from nitorch
sys.path.insert(0,"/analysis/ritter/AD/Budding_Spectral_Analysis/code/nitorch/")
from nitorch.data import load_nifti
from nitorch.transforms import ToTensor, SagittalTranslate, SagittalFlip, \
AxialTranslate, normalization_factors, Normalize, \
IntensityRescale
from nitorch.callbacks import EarlyStopping, ModelCheckpoint
from nitorch.trainer import Trainer
from nitorch.initialization import weights_init
from nitorch.metrics import balanced_accuracy, sensitivity, specificity, auc_score
from nitorch.utils import count_parameters
from nitorch.inference import predict
torch.__version__
gpu = 4
b = 4
num_classes = 2
dtype = np.float64
# Inference on different splits of the holdout set
# holdout_h5 = h5py.File("/analysis/ritter/projects/AD/Budding_Spectral_Analysis/data/ADNI_mci_all.h5", 'r')
train_h5 = h5py.File("/analysis/ritter/projects/AD/Budding_Spectral_Analysis/data/ADNI_training_no_outliers.h5", 'r')
val_h5 = h5py.File("/analysis/ritter/projects/AD/Budding_Spectral_Analysis/data/ADNI_validation_no_outliers.h5", 'r')
holdout_h5 = h5py.File("/analysis/ritter/projects/AD/Budding_Spectral_Analysis/data/ADNI_holdout_no_outliers.h5", 'r')
X_train, y_train = train_h5['X'], train_h5['y']
X_val, y_val = val_h5['X'], val_h5['y']
X_holdout, y_holdout = holdout_h5['X'], holdout_h5['y']
mean_std_normalization = False
min_max_normalization = True
# normalize min-max
X_train = np.array(X_train)
X_val = np.array(X_val)
X_holdout = np.array(X_holdout)
y_train = np.array(y_train)
y_val = np.array(y_val)
y_holdout = np.array(y_holdout)
if mean_std_normalization:
mean = np.mean(X_train)
std = np.std(X_train)
X_train = (X_train - mean) / std
X_val = (X_val - mean) / std
X_holdout = (X_holdout - mean) / std
if min_max_normalization:
for i in range(len(X_train)):
X_train[i] -= np.min(X_train[i])
X_train[i] /= np.max(X_train[i])
for i in range(len(X_val)):
X_val[i] -= np.min(X_val[i])
X_val[i] /= np.max(X_val[i])
for i in range(len(X_holdout)):
X_holdout[i] -= np.min(X_holdout[i])
X_holdout[i] /= np.max(X_holdout[i])
class ADNIDataset(Dataset):
def __init__(self, X, y, transform=None, target_transform=None, mask=None, z_factor=None, dtype=np.float32, num_classes=2):
self.X = np.copy(X)
self.y = np.copy(y)
self.X = X
self.y = y
self.transform = transform
self.target_transform = target_transform
self.mask = mask
self.z_factor = z_factor
self.dtype = dtype
self.num_classes = num_classes
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
image = self.X[idx]
label_tensor = np.zeros(shape=(self.num_classes,))
label = self.y[idx] >= 0.5
label = torch.LongTensor([label])
if self.transform:
image = self.transform(image)
sample = {"image" : image,
"label" : label}
return sample
augmentations = [SagittalFlip(), SagittalTranslate(dist=(-2, 3))]
adni_data_train = ADNIDataset(X_train, y_train, transform=transforms.Compose(augmentations + [ToTensor()]), dtype=dtype)
adni_data_val = ADNIDataset(X_val, y_val, transform=transforms.Compose([ToTensor()]), dtype=dtype)
adni_data_test = ADNIDataset(X_holdout, y_holdout, transform=transforms.Compose([ToTensor()]), dtype=dtype)
sample = adni_data_test[50]
img = sample["image"]
print(img.shape)
plt.imshow(img[0][:,:,70], cmap='gray')
class ClassificationModel3D(nn.Module):
"""The model we use in the paper."""
def __init__(self, dropout=0.4, dropout2=0.4):
nn.Module.__init__(self)
self.Conv_1 = nn.Conv3d(1, 8, 3, bias = False)
self.Conv_1_bn = nn.BatchNorm3d(8)
self.Conv_1_mp = nn.MaxPool3d(2)
self.Conv_2 = nn.Conv3d(8, 16, 3, bias = False)
self.Conv_2_bn = nn.BatchNorm3d(16)
self.Conv_2_mp = nn.MaxPool3d(3)
self.Conv_3 = nn.Conv3d(16, 32, 3, bias = False)
self.Conv_3_bn = nn.BatchNorm3d(32)
self.Conv_3_mp = nn.MaxPool3d(2)
self.Conv_4 = nn.Conv3d(32, 64, 3, bias = False)
self.Conv_4_bn = nn.BatchNorm3d(64)
self.Conv_4_mp = nn.MaxPool3d(3)
self.dense_1 = nn.Linear(2304, 128)
self.dense_2 = nn.Linear(128, 2)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout2)
def forward(self, x):
x = self.relu(self.Conv_1_bn(self.Conv_1(x)))
x = self.Conv_1_mp(x)
x = self.relu(self.Conv_2_bn(self.Conv_2(x)))
x = self.Conv_2_mp(x)
x = self.relu(self.Conv_3_bn(self.Conv_3(x)))
x = self.Conv_3_mp(x)
x = self.relu(self.Conv_4_bn(self.Conv_4(x)))
x = self.Conv_4_mp(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.relu(self.dense_1(x))
x = self.dropout2(x)
x = self.dense_2(x)
return x
net = ClassificationModel3D().cuda(gpu)
print("Trainable model parameters: {}".format(count_parameters(net)))
def run(
net,
data,
shape,
callbacks=[],
augmentations=[],
masked=False,
metrics=[],
k_folds=None,
b=4,
num_epochs=35,
retain_metric=None
):
fold_metric = []
models = []
fold = 0
initial_prepend = None
for trial in range(4):
print("Starting trial {}".format(trial))
# add current fold number to model checkpoint path
if callbacks is not None:
for idx, callback in enumerate(callbacks):
if isinstance(callback, ModelCheckpoint):
if initial_prepend is None:
initial_prepend = callbacks[idx].prepend
callbacks[idx].prepend = initial_prepend + "cv_fold_{}_".format(fold)
fold += 1
# restart model
del net
net = ClassificationModel3D().cuda(gpu)
# reset hyperparameters
lr = 1e-4
wd = 1e-4
criterion = nn.CrossEntropyLoss().cuda(gpu)
optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=wd)
train_loader = DataLoader(
adni_data_train, batch_size=b, num_workers=4, shuffle=True
)
val_loader = DataLoader(
adni_data_val, batch_size=1, num_workers=1, shuffle=True
)
sample = next(iter(train_loader))
img = sample["image"][0]
lbl = sample["label"][0]
plt.imshow(img.squeeze()[:,:,70], cmap='gray')
plt.title(lbl.item())
plt.show()
trainer = Trainer(
net,
criterion,
optimizer,
metrics=metrics,
callbacks=callbacks,
device=gpu,
prediction_type="classification"
)
# train model and store results
net, report = trainer.train_model(
train_loader,
val_loader,
num_epochs=num_epochs,
show_train_steps=60,
show_validation_epochs=1,
)
# append validation score of the retain metric
if isinstance(retain_metric, str):
fold_metric.append(report["val_metrics"][retain_metric][-1])
else:
fold_metric.append(report["val_metrics"][retain_metric.__name__][-1])
models.append(net)
print("Finished fold.")
# visualize result
trainer.visualize_training(report, metrics)
trainer.evaluate_model(val_loader, gpu)
print("################################")
print("################################")
print("All accuracies: {}".format(fold_metric))
return fold_metric, models
num_epochs = 200
min_iters = 3
ignore_epochs = 15
normalize = False
retain_metric = accuracy_score
metrics = [accuracy_score]
r = 0
model_path = "/analysis/ritter/projects/AD/Budding_Spectral_Analysis/models/model_final"
check = ModelCheckpoint(path=model_path,
prepend="repeat_{}".format(r),
store_best=True,
ignore_before=ignore_epochs,
retain_metric=retain_metric)
callbacks = [check, EarlyStopping(patience=8, ignore_before=ignore_epochs, retain_metric="loss", mode='min')]
fold_metric, models = run(net=net, data=adni_data_train,
k_folds=-1,
callbacks=callbacks,
shape=-1,
masked=False,
metrics=metrics,
num_epochs=num_epochs,
retain_metric=retain_metric,
b=b,
)
print(np.mean(fold_metric))
print(np.std(fold_metric))
model_path = "/analysis/ritter/projects/AD/Budding_Spectral_Analysis/models/model_final"
# load models
models = []
for i in range(4):
model_dir = os.path.join(model_path, "repeat_0_cv_fold_{}_BEST_ITERATION.h5".format(i))
net = ClassificationModel3D()
net.load_state_dict(torch.load(model_dir))
models.append(net)
test_loader = DataLoader(
adni_data_test, batch_size=1, num_workers=1, shuffle=False
)
metrics = []
lr = 1e-5
wd = 1e-3
criterion = nn.BCEWithLogitsLoss().cuda(gpu)
optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=wd)
for fold, model in enumerate(models):
print("Fold {}".format(fold))
all_preds = []
all_labels = []
net = model.cuda(gpu)
net.eval()
with torch.no_grad():
for sample in test_loader:
img = sample["image"]
label = sample["label"]
img = img.to(torch.device("cuda:" + str(gpu)))
output = net.forward(img)
pred = torch.argmax(F.softmax(output, dim=1))
all_preds.append(pred.cpu().numpy().item())
all_labels.append(label.numpy().item())
balanced_acc = balanced_accuracy(all_labels, all_preds)
sens = sensitivity(all_labels, all_preds)
spec = specificity(all_labels, all_preds)
auc = auc_score(all_labels, all_preds)
print(balanced_acc)
'''trainer = Trainer(
net,
criterion,
optimizer,
scheduler=None,
metrics=metrics,
callbacks=None,
device=gpu,
prediction_type="binary"
)
computed_metrics = trainer.evaluate_model(test_loader, metrics=[balanced_accuracy])'''
net.train()
metrics.append((balanced_acc, sens, spec, auc))
print("######## Final results ########")
metrics_df = pd.DataFrame(metrics)
print(metrics_df)
print("Balanced accuracy mean {:.2f} %".format(np.mean(metrics_df[0])*100))
quit()
| 0.707304 | 0.520496 |
## Representing Data and Engineering Features
Categorical Variables
One-Hot-Encoding (Dummy variables)
```
pip install mglearn
import mglearn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
# The file has no headers naming the columns, so we pass header=None
# and provide the column names explicitly in "names"
adult_path = os.path.join(mglearn.datasets.DATA_PATH, "adult.data")
data = pd.read_csv(
adult_path, header=None, index_col=False,
names=['age', 'workclass', 'fnlwgt', 'education', 'education-num',
'marital-status', 'occupation', 'relationship', 'race', 'gender',
'capital-gain', 'capital-loss', 'hours-per-week', 'native-country',
'income'])
# For illustration purposes, we only select some of the columns
data = data[['age', 'workclass', 'education', 'gender', 'hours-per-week',
'occupation', 'income']]
# IPython.display allows nice output formatting within the Jupyter notebook
display(data.head())
print("Original features:\n", list(data.columns), "\n")
data_dummies = pd.get_dummies(data)
print("Features after get_dummies:\n", list(data_dummies.columns))
display(data_dummies.head())
features = data_dummies.loc[:, 'age':'occupation_ Transport-moving']
# Extract NumPy arrays
X = features.values
y = data_dummies['income_ >50K'].values
print("X.shape: {} y.shape: {}".format(X.shape, y.shape))
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Test score: {:.2f}".format(logreg.score(X_test, y_test)))
```
# 구간분할
- 이 부분은 input과 output을 그리는 데이터는 동일한데 linear model을 실행시키는 데이터의 형태를 바꾸어 주어서
- 선형을 사용하지만 비선형 형태의 예측값을 얻을 수도 있음을 보여주는 것입니다. 수업시간에 이야기 한 것처럼 decision tree의 경우는 원래 데이터를 나누어 가면서 학습하는 형태이기에 구간으로 나누는 방법에 크게 성능차이를 보이지 않습니다.
- 비선형 관계를 가지지만 용량이 매우 크고 고차원 자료라서 linear model을 사용해야 한다면 구간 분할을 통해 모델의 성능을 높일 수 있음을 보여주는 예제입니다.
```
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
X, y = mglearn.datasets.make_wave(n_samples=120)
line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)
reg = DecisionTreeRegressor(min_samples_leaf=3).fit(X, y)
plt.plot(line, reg.predict(line), label="decision tree")
reg = LinearRegression().fit(X, y)
plt.plot(line, reg.predict(line), label="linear regression")
plt.plot(X[:, 0], y, 'o', c='k')
plt.ylabel("Regression output")
plt.xlabel("Input feature")
plt.legend(loc="best")
from sklearn.preprocessing import KBinsDiscretizer
kb = KBinsDiscretizer(n_bins=10, strategy='uniform')
kb.fit(X)
print("bin edges: \n", kb.bin_edges_)
```
### transform 을 이용해서 10개로 나누어진 구간에 각 데이터를 인코딩합니다.
```
X_binned = kb.transform(X)
X_binned
print(X[:10])
X_binned.toarray()[:10]
```
- decision tree 와 regression model의 결과가 같게 나옵니다.
- 위의 그림과 비교했을 때 decision tree는 오히려 더 움직임이 경직되고 regression model은 더 유연해진 것을 볼 수 있습니다. 구간을 나누는 것이 linear model에는 긍정적인 영향을 주지만 모든 모델에 영향을 주는 것은 아님을 알려줍니다.
```
kb = KBinsDiscretizer(n_bins=10, strategy='uniform', encode='onehot-dense')
kb.fit(X)
X_binned = kb.transform(X)
line_binned = kb.transform(line)
reg = LinearRegression().fit(X_binned, y)
plt.plot(line, reg.predict(line_binned), label='linear regression binned')
reg = DecisionTreeRegressor(min_samples_split=3).fit(X_binned, y)
plt.plot(line, reg.predict(line_binned), label='decision tree binned')
plt.plot(X[:, 0], y, 'o', c='k')
plt.vlines(kb.bin_edges_[0], -3, 3, linewidth=1, alpha=.2)
plt.legend(loc="best")
plt.ylabel("Regression output")
plt.xlabel("Input feature")
```
## Interactions and Polynomials
- 위의 그림은 각 구간별로 절편을 학습해주는 형태이기에 기울기가 나타나지 않습니다.
- 각 구간별로 기울기가 나타나도록 학습해주는 것인데 구체적인 의미를 알려면 회귀 모형에 대한 설명이 필요하니 머신러닝에서 각 구간별로 구간 회귀를 만들어서 복잡한 모형에 의해 구현되는 것과 같은 효과를 얻어낼 수도 있다는 사실을 알고 필요할 때 적용해보시면 되겠습니다.
```
X_combined = np.hstack([X, X_binned]) # add original input X
print(X_combined.shape)
reg = LinearRegression().fit(X_combined, y)
line_combined = np.hstack([line, line_binned])
plt.plot(line, reg.predict(line_combined), label='linear regression combined')
plt.vlines(kb.bin_edges_[0], -3, 3, linewidth=1, alpha=.2)
plt.legend(loc="best")
plt.ylabel("Regression output")
plt.xlabel("Input feature")
plt.plot(X[:, 0], y, 'o', c='k')
```
## 기울기를 다르게 학습하도록 입력 변수 추가
```
X_product = np.hstack([X_binned, X * X_binned]) # add interaction term
print(X_product.shape)
```
- 각 구간에서의 절편과 기울기가 다르게 학습이 되었음을 알 수 있다.
```
reg = LinearRegression().fit(X_product, y)
line_product = np.hstack([line_binned, line * line_binned])
plt.plot(line, reg.predict(line_product), label='linear regression product')
plt.vlines(kb.bin_edges_[0], -3, 3, linewidth=1, alpha=.2)
plt.plot(X[:, 0], y, 'o', c='k')
plt.ylabel("Regression output")
plt.xlabel("Input feature")
plt.legend(loc="best")
```
## 다항식 항을 추가하여 다항 회귀 모델로 비선형 회귀 모델을 가능하게 함
```
from sklearn.preprocessing import PolynomialFeatures
# include polynomials up to x ** 10:
# the default "include_bias=True" adds a feature that's constantly 1
poly = PolynomialFeatures(degree=10, include_bias=False)
poly.fit(X)
X_poly = poly.transform(X)
print("X_poly.shape: {}".format(X_poly.shape))
print("Entries of X:\n{}".format(X[:5]))
print("Entries of X_poly:\n{}".format(X_poly[:5]))
print("Polynomial feature names:\n{}".format(poly.get_feature_names()))
reg = LinearRegression().fit(X_poly, y)
line_poly = poly.transform(line)
plt.plot(line, reg.predict(line_poly), label='polynomial linear regression')
plt.plot(X[:, 0], y, 'o', c='k')
plt.ylabel("Regression output")
plt.xlabel("Input feature")
plt.legend(loc="best")
```
## 원본 데이터에 복잡한 모형인 SVM을 학습하여 위의 회귀모형 결과와 비교
- 다항회귀와 비슷한 복잡도를 가진 예측이 생성됨을 확인
```
from sklearn.svm import SVR
for gamma in [1, 10]:
svr = SVR(gamma=gamma).fit(X, y)
plt.plot(line, svr.predict(line), label='SVR gamma={}'.format(gamma))
plt.plot(X[:, 0], y, 'o', c='k')
plt.ylabel("Regression output")
plt.xlabel("Input feature")
plt.legend(loc="best")
```
## 보스톤 주택 가격 자료로 상호작용과 다항식 특성을 추가했을 때의 성능 향상을 보고자 함.
```
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
boston = load_boston()
X_train, X_test, y_train, y_test = train_test_split(
boston.data, boston.target, random_state=0)
# rescale data
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
poly = PolynomialFeatures(degree=2).fit(X_train_scaled)
X_train_poly = poly.transform(X_train_scaled)
X_test_poly = poly.transform(X_test_scaled)
print("X_train.shape: {}".format(X_train.shape))
print("X_train_poly.shape: {}".format(X_train_poly.shape))
print("Polynomial feature names:\n{}".format(poly.get_feature_names()))
```
- Ridge 는 성능이 향상되었으나 RandomForest는 성능이 오히려 조금 줄어들었음을 알 수 있다.
- RandomForest는 여러 머신러닝을 연결한 모형인 앙상블에 해당하기에 성능이 대부분 아주 좋게 나온다.
- 원본의 RandomForest와 상호작용과 다항식을 추가한 Ridge가 비슷한 성능을 보인다. 이는 단순한 모형에 상호작용과 다항식이 더해져서 복잡한 모형과 유사한 성능을 나타냄을 보이고자 하는 예제이다.
```
from sklearn.linear_model import Ridge
ridge = Ridge().fit(X_train_scaled, y_train)
print("Score without interactions: {:.3f}".format(
ridge.score(X_test_scaled, y_test)))
ridge = Ridge().fit(X_train_poly, y_train)
print("Score with interactions: {:.3f}".format(
ridge.score(X_test_poly, y_test)))
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=100).fit(X_train_scaled, y_train)
print("Score without interactions: {:.3f}".format(
rf.score(X_test_scaled, y_test)))
rf = RandomForestRegressor(n_estimators=100).fit(X_train_poly, y_train)
print("Score with interactions: {:.3f}".format(rf.score(X_test_poly, y_test)))
```
## Univariate Nonlinear Transformations
- 대부분이 모형은 특성이 정규분포와 비슷할 때 최고의 성능을 낸다.
- 정규분포가 아닌 경우 변환을 통해 치우침이 덜한 형태로 조정한 후 머신러닝을 적용한다.
```
rnd = np.random.RandomState(0)
X_org = rnd.normal(size=(1000, 3))
w = rnd.normal(size=3)
X = rnd.poisson(10 * np.exp(X_org))
y = np.dot(X_org, w)
print("Number of feature appearances:\n{}".format(np.bincount(X[:, 0])))
bins = np.bincount(X[:, 0])
plt.bar(range(len(bins)), bins, color='grey')
plt.ylabel("Number of appearances")
plt.xlabel("Value")
```
- 위와 같이 한쪽으로 치우친 특성으로 ridge를 실행하였을 때의 성능
```
from sklearn.linear_model import Ridge
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
score = Ridge().fit(X_train, y_train).score(X_test, y_test)
print("Test score: {:.3f}".format(score))
X_train_log = np.log(X_train + 1)
X_test_log = np.log(X_test + 1)
plt.hist(X_train_log[:, 0], bins=25, color='gray')
plt.ylabel("Number of appearances")
plt.xlabel("Value")
```
- 로그 변환으로 치우침을 제거한 후 ridge를 실행하였더니 성능이 매우 향상되었음을 확인
```
score = Ridge().fit(X_train_log, y_train).score(X_test_log, y_test)
print("Test score: {:.3f}".format(score))
```
# Automatic Feature Selection
### Univariate statistics
```
from sklearn.datasets import load_breast_cancer
from sklearn.feature_selection import SelectPercentile
from sklearn.model_selection import train_test_split
cancer = load_breast_cancer()
# get deterministic random numbers
rng = np.random.RandomState(42)
noise = rng.normal(size=(len(cancer.data), 50))
# add noise features to the data
# the first 30 features are from the dataset, the next 50 are noise
X_w_noise = np.hstack([cancer.data, noise])
X_train, X_test, y_train, y_test = train_test_split(
X_w_noise, cancer.target, random_state=0, test_size=.5)
# use f_classif (the default) and SelectPercentile to select 50% of features
select = SelectPercentile(percentile=50)
select.fit(X_train, y_train)
# transform training set
X_train_selected = select.transform(X_train)
print("X_train.shape: {}".format(X_train.shape))
print("X_train_selected.shape: {}".format(X_train_selected.shape))
```
- 80개의 특성 중 40개를 선택한 후 logistic을 실행하여 비교한 결과 변수 선택 후 성능이 향상 됨.
- 항상 성능이 향상되지는 않고 성능이 좋아지지 않는 경우도 있음.
```
from sklearn.linear_model import LogisticRegression
# transform test data
X_test_selected = select.transform(X_test)
lr = LogisticRegression(max_iter=1000)
lr.fit(X_train, y_train)
print("Score with all features: {:.3f}".format(lr.score(X_test, y_test)))
lr.fit(X_train_selected, y_train)
print("Score with only selected features: {:.3f}".format(
lr.score(X_test_selected, y_test)))
```
### Model-based Feature Selection
- 위의 일변량 분석보다 훨씬 강력한 방법이고 매우 복잡한 모델
- 특성에 대한 중요도가 포함되는 지도학습 머신 러닝 모델을 이용해서 중요한 특성을 한번에 선택하게 함.
- 위의 일변량 특성 선택보다 RandomForest를 사용하였을 때 향상된 성능을 보임
```
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
select = SelectFromModel(
RandomForestClassifier(n_estimators=100, random_state=42),
threshold="median")
select.fit(X_train, y_train)
X_train_l1 = select.transform(X_train)
print("X_train.shape: {}".format(X_train.shape))
print("X_train_l1.shape: {}".format(X_train_l1.shape))
X_test_l1 = select.transform(X_test)
score = LogisticRegression(max_iter=1000).fit(X_train_l1, y_train).score(X_test_l1, y_test)
print("Test score: {:.3f}".format(score))
```
### Iterative feature selection
- 반복해서 학습하여 특성을 선택하므로 모델기반보다 오래 걸림
- Logistic regression 의 성능이 역시 변수 선택에 의해 향상된 결과를 보임
- RandomForest 의 성능도 역시 0.951로 선택된 특성으로 logistic regression의 성능과 같다. 즉 특성 선택이 제대로 될 경우 선형모델의 성능은 RandomForest의 성능에 버금가도록 만들 수 있음을 보여줌.
```
from sklearn.feature_selection import RFE
select = RFE(RandomForestClassifier(n_estimators=100, random_state=42),
n_features_to_select=40)
select.fit(X_train, y_train)
X_train_rfe = select.transform(X_train)
X_test_rfe = select.transform(X_test)
score = LogisticRegression(max_iter=1000).fit(X_train_rfe, y_train).score(X_test_rfe, y_test)
print("Test score: {:.3f}".format(score))
print("Test score: {:.3f}".format(select.score(X_test, y_test))) ## Result of RandomForest
```
|
github_jupyter
|
pip install mglearn
import mglearn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
# The file has no headers naming the columns, so we pass header=None
# and provide the column names explicitly in "names"
adult_path = os.path.join(mglearn.datasets.DATA_PATH, "adult.data")
data = pd.read_csv(
adult_path, header=None, index_col=False,
names=['age', 'workclass', 'fnlwgt', 'education', 'education-num',
'marital-status', 'occupation', 'relationship', 'race', 'gender',
'capital-gain', 'capital-loss', 'hours-per-week', 'native-country',
'income'])
# For illustration purposes, we only select some of the columns
data = data[['age', 'workclass', 'education', 'gender', 'hours-per-week',
'occupation', 'income']]
# IPython.display allows nice output formatting within the Jupyter notebook
display(data.head())
print("Original features:\n", list(data.columns), "\n")
data_dummies = pd.get_dummies(data)
print("Features after get_dummies:\n", list(data_dummies.columns))
display(data_dummies.head())
features = data_dummies.loc[:, 'age':'occupation_ Transport-moving']
# Extract NumPy arrays
X = features.values
y = data_dummies['income_ >50K'].values
print("X.shape: {} y.shape: {}".format(X.shape, y.shape))
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Test score: {:.2f}".format(logreg.score(X_test, y_test)))
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
X, y = mglearn.datasets.make_wave(n_samples=120)
line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)
reg = DecisionTreeRegressor(min_samples_leaf=3).fit(X, y)
plt.plot(line, reg.predict(line), label="decision tree")
reg = LinearRegression().fit(X, y)
plt.plot(line, reg.predict(line), label="linear regression")
plt.plot(X[:, 0], y, 'o', c='k')
plt.ylabel("Regression output")
plt.xlabel("Input feature")
plt.legend(loc="best")
from sklearn.preprocessing import KBinsDiscretizer
kb = KBinsDiscretizer(n_bins=10, strategy='uniform')
kb.fit(X)
print("bin edges: \n", kb.bin_edges_)
X_binned = kb.transform(X)
X_binned
print(X[:10])
X_binned.toarray()[:10]
kb = KBinsDiscretizer(n_bins=10, strategy='uniform', encode='onehot-dense')
kb.fit(X)
X_binned = kb.transform(X)
line_binned = kb.transform(line)
reg = LinearRegression().fit(X_binned, y)
plt.plot(line, reg.predict(line_binned), label='linear regression binned')
reg = DecisionTreeRegressor(min_samples_split=3).fit(X_binned, y)
plt.plot(line, reg.predict(line_binned), label='decision tree binned')
plt.plot(X[:, 0], y, 'o', c='k')
plt.vlines(kb.bin_edges_[0], -3, 3, linewidth=1, alpha=.2)
plt.legend(loc="best")
plt.ylabel("Regression output")
plt.xlabel("Input feature")
X_combined = np.hstack([X, X_binned]) # add original input X
print(X_combined.shape)
reg = LinearRegression().fit(X_combined, y)
line_combined = np.hstack([line, line_binned])
plt.plot(line, reg.predict(line_combined), label='linear regression combined')
plt.vlines(kb.bin_edges_[0], -3, 3, linewidth=1, alpha=.2)
plt.legend(loc="best")
plt.ylabel("Regression output")
plt.xlabel("Input feature")
plt.plot(X[:, 0], y, 'o', c='k')
X_product = np.hstack([X_binned, X * X_binned]) # add interaction term
print(X_product.shape)
reg = LinearRegression().fit(X_product, y)
line_product = np.hstack([line_binned, line * line_binned])
plt.plot(line, reg.predict(line_product), label='linear regression product')
plt.vlines(kb.bin_edges_[0], -3, 3, linewidth=1, alpha=.2)
plt.plot(X[:, 0], y, 'o', c='k')
plt.ylabel("Regression output")
plt.xlabel("Input feature")
plt.legend(loc="best")
from sklearn.preprocessing import PolynomialFeatures
# include polynomials up to x ** 10:
# the default "include_bias=True" adds a feature that's constantly 1
poly = PolynomialFeatures(degree=10, include_bias=False)
poly.fit(X)
X_poly = poly.transform(X)
print("X_poly.shape: {}".format(X_poly.shape))
print("Entries of X:\n{}".format(X[:5]))
print("Entries of X_poly:\n{}".format(X_poly[:5]))
print("Polynomial feature names:\n{}".format(poly.get_feature_names()))
reg = LinearRegression().fit(X_poly, y)
line_poly = poly.transform(line)
plt.plot(line, reg.predict(line_poly), label='polynomial linear regression')
plt.plot(X[:, 0], y, 'o', c='k')
plt.ylabel("Regression output")
plt.xlabel("Input feature")
plt.legend(loc="best")
from sklearn.svm import SVR
for gamma in [1, 10]:
svr = SVR(gamma=gamma).fit(X, y)
plt.plot(line, svr.predict(line), label='SVR gamma={}'.format(gamma))
plt.plot(X[:, 0], y, 'o', c='k')
plt.ylabel("Regression output")
plt.xlabel("Input feature")
plt.legend(loc="best")
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
boston = load_boston()
X_train, X_test, y_train, y_test = train_test_split(
boston.data, boston.target, random_state=0)
# rescale data
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
poly = PolynomialFeatures(degree=2).fit(X_train_scaled)
X_train_poly = poly.transform(X_train_scaled)
X_test_poly = poly.transform(X_test_scaled)
print("X_train.shape: {}".format(X_train.shape))
print("X_train_poly.shape: {}".format(X_train_poly.shape))
print("Polynomial feature names:\n{}".format(poly.get_feature_names()))
from sklearn.linear_model import Ridge
ridge = Ridge().fit(X_train_scaled, y_train)
print("Score without interactions: {:.3f}".format(
ridge.score(X_test_scaled, y_test)))
ridge = Ridge().fit(X_train_poly, y_train)
print("Score with interactions: {:.3f}".format(
ridge.score(X_test_poly, y_test)))
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=100).fit(X_train_scaled, y_train)
print("Score without interactions: {:.3f}".format(
rf.score(X_test_scaled, y_test)))
rf = RandomForestRegressor(n_estimators=100).fit(X_train_poly, y_train)
print("Score with interactions: {:.3f}".format(rf.score(X_test_poly, y_test)))
rnd = np.random.RandomState(0)
X_org = rnd.normal(size=(1000, 3))
w = rnd.normal(size=3)
X = rnd.poisson(10 * np.exp(X_org))
y = np.dot(X_org, w)
print("Number of feature appearances:\n{}".format(np.bincount(X[:, 0])))
bins = np.bincount(X[:, 0])
plt.bar(range(len(bins)), bins, color='grey')
plt.ylabel("Number of appearances")
plt.xlabel("Value")
from sklearn.linear_model import Ridge
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
score = Ridge().fit(X_train, y_train).score(X_test, y_test)
print("Test score: {:.3f}".format(score))
X_train_log = np.log(X_train + 1)
X_test_log = np.log(X_test + 1)
plt.hist(X_train_log[:, 0], bins=25, color='gray')
plt.ylabel("Number of appearances")
plt.xlabel("Value")
score = Ridge().fit(X_train_log, y_train).score(X_test_log, y_test)
print("Test score: {:.3f}".format(score))
from sklearn.datasets import load_breast_cancer
from sklearn.feature_selection import SelectPercentile
from sklearn.model_selection import train_test_split
cancer = load_breast_cancer()
# get deterministic random numbers
rng = np.random.RandomState(42)
noise = rng.normal(size=(len(cancer.data), 50))
# add noise features to the data
# the first 30 features are from the dataset, the next 50 are noise
X_w_noise = np.hstack([cancer.data, noise])
X_train, X_test, y_train, y_test = train_test_split(
X_w_noise, cancer.target, random_state=0, test_size=.5)
# use f_classif (the default) and SelectPercentile to select 50% of features
select = SelectPercentile(percentile=50)
select.fit(X_train, y_train)
# transform training set
X_train_selected = select.transform(X_train)
print("X_train.shape: {}".format(X_train.shape))
print("X_train_selected.shape: {}".format(X_train_selected.shape))
from sklearn.linear_model import LogisticRegression
# transform test data
X_test_selected = select.transform(X_test)
lr = LogisticRegression(max_iter=1000)
lr.fit(X_train, y_train)
print("Score with all features: {:.3f}".format(lr.score(X_test, y_test)))
lr.fit(X_train_selected, y_train)
print("Score with only selected features: {:.3f}".format(
lr.score(X_test_selected, y_test)))
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
select = SelectFromModel(
RandomForestClassifier(n_estimators=100, random_state=42),
threshold="median")
select.fit(X_train, y_train)
X_train_l1 = select.transform(X_train)
print("X_train.shape: {}".format(X_train.shape))
print("X_train_l1.shape: {}".format(X_train_l1.shape))
X_test_l1 = select.transform(X_test)
score = LogisticRegression(max_iter=1000).fit(X_train_l1, y_train).score(X_test_l1, y_test)
print("Test score: {:.3f}".format(score))
from sklearn.feature_selection import RFE
select = RFE(RandomForestClassifier(n_estimators=100, random_state=42),
n_features_to_select=40)
select.fit(X_train, y_train)
X_train_rfe = select.transform(X_train)
X_test_rfe = select.transform(X_test)
score = LogisticRegression(max_iter=1000).fit(X_train_rfe, y_train).score(X_test_rfe, y_test)
print("Test score: {:.3f}".format(score))
print("Test score: {:.3f}".format(select.score(X_test, y_test))) ## Result of RandomForest
| 0.659515 | 0.95594 |
# Job Applications Recommendation System
Let me step through the demo of machine learning model to recommend job applications to job applicants. For the purpose of this demo, let us use publicly [Australian job listings data from Seek job board](https://www.kaggle.com/PromptCloudHQ/australian-job-listings-data-from-seek-job-board/downloads/australian-job-listings-data-from-seek-job-board.zip/1) dataset.
Download data from above link and then load it onto path where it is accessible to notebook.
Now we perform exploratory data analysis on the dataset.
```
!unzip australian-job-listings-data-from-seek-job-board.zip
!chmod 755 seek_australia_sample.csv
# Generic imports
import pandas as pd
import numpy as np
df = pd.read_csv('seek_australia_sample.csv', encoding='latin1')
print(f'Our data set has {df.shape[0]} records and {df.shape[1]} features or columns.')
# Identify initial records in the data
df.head()
print('Checking the data consistency')
df.isnull().sum()
```
From the above output it appears most of the features are clean but **job_description** feature which is key for building our recommendation model is having lot of empty or null values.
For the sake of this demo we shall proceed with using the same data. But when we build our actual recommendation system, we need to ensure this key field/feature information is captured for each job listing.
**salary_offered** and **state** are also missing many records, so let us remove these feature information completely.
```
df.drop(columns=['state','salary_offered'], inplace=True)
df.dropna(inplace=True)
print(f'After removing empty records our data set has {df.shape[0]} records and {df.shape[1]} features or columns.')
df.isnull().sum()
```
### Jaccard Similarity:
For building our recommendation system, we shall compare the job posting with job applicants summary or skill set uploaded. For this we use commonly used text similarity metric **Jaccard Similarity**.
Jaccard similarity or intersection over union is defined as size of intersection divided by size of union of two sets. This is especially useful since job description and job applicants summary both can have repeated words.
### Text processing using NLTK
Before we run Jaccard similarity on our data we have to further clean up our text data.
Cleaning of text data is done with the help of Natural Language Tool Kit(NLTK) library.
```
!pip install --upgrade pip
!pip install -U nltk
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
import string
table = str.maketrans('','', string.punctuation)
from nltk.tokenize import word_tokenize # Word Tokenizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words = set(stop_words)
from nltk.stem.wordnet import WordNetLemmatizer # Word Lemmatizer
lemmatizer = WordNetLemmatizer()
def clean_text(text):
"""
Cleaning the document before vectorization.
"""
# Tokenize by word
tokens = word_tokenize(text)
# Make all words lowercase
lowercase_tokens = [w.lower() for w in tokens]
# Strip punctuation from within words
no_punctuation = [x.translate(table) for x in lowercase_tokens]
# Remove words that aren't alphabetic
alphabetic = [word for word in no_punctuation if word.isalpha()]
# Remove stopwords
no_stop_words = [w for w in alphabetic if not w in stop_words]
# Lemmatize words
lemmas = [lemmatizer.lemmatize(word) for word in no_stop_words]
return lemmas
# Clean up the text
df['cleaned_text'] = df.job_description.apply(clean_text)
```
### Job Applicants Input
Below cell has sample user information which shall be used as input to the recommendation system.
Since job descriptions can overlap across different job titles, we can request user to input specific title which he/she is looking for.
```
# 1st Sample User Information
user_title = "Business Analyst"
user_info_summary = """Detail-oriented and proactive Business Analyst with a history of involvement in IT, supply chain and CRM projects.
Always positive, team-focused and actively striving to build my domain knowledge and technical skills to deliver successful outcomes in complex project environments.
BA techniques and underlying competencies include:
Requirements elicitation and documentation
User stories, use cases, feature mapping
Process modelling as-is & to-be
Scope analysis
Stakeholder management
Interviews
Requirements traceability
Data mapping
System testing
Business integration
User guides & training
Problem solving
Creative thinking
Teamwork
Excellent written and oral communication
Facilitation
Leadership
Mentoring (IBL students)"""
# Clean up the user input
cleaned_user_summary = clean_text(user_info_summary)
def get_jaccard_sim(str1, str2):
a = set(str1)
b = set(str2)
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
df_match_by_title = df[df['job_title']==user_title].copy()
df_match_by_title['jaccard_sim_value'] = df_match_by_title.cleaned_text.apply(get_jaccard_sim, args=(cleaned_user_summary,))
sort_by_jaccard_sim = df_match_by_title.sort_values('jaccard_sim_value', ascending=False)
sort_by_jaccard_sim.head(5)
```
Based on Job applicant's input, we can make our model more specific.
Here we user has input preferred job type in addition to job title. This shall be used as input to our model in filtering only relevant jobs.
```
# 2nd User Information
user_preferred_job_type = "Full Time"
user_title = "Senior Accountant"
user_info_summary = """Seasoned finance professional with 7 years of accelerating career in management finance and accounting and analytical roles:
Proficient in overall financial management & analysis, revenue recognition, accrual accounting, Statutory audit, US GAAP reporting, SOX compliance, intercompany consolidation, risk mitigation, fiscal planning, budgeting and reporting, tax strategies;
Hold a Certification in Accounting from California, Bachelors degree and hands on with , NetSuite ERP, SAP Finance, Xero, MYOB and QuickBooks;
Experience working for listed multinational companies with internal controls in various geographic locations brings immense cultural exposure, leadership and a charismatic personality. """
# Clean up the user input
cleaned_user_summary = clean_text(user_info_summary)
df_match = df[(df['job_title'] == user_title) & (df['job_type']==user_preferred_job_type)].copy()
df_match['jaccard_sim_value'] = df_match.cleaned_text.apply(get_jaccard_sim, args=(cleaned_user_summary,))
sort_by_jaccard_sim = df_match.sort_values('jaccard_sim_value', ascending=False)
sort_by_jaccard_sim.head(5)
```
### Demo Conclusion
Similarly, we can add more filters in the job recommendations.
If job applicant has provided feedback on the earlier job recommendations provided, we can use the user input for building recommendation based on collaborative filtering too which would be subsequent step based on available dataset.
|
github_jupyter
|
!unzip australian-job-listings-data-from-seek-job-board.zip
!chmod 755 seek_australia_sample.csv
# Generic imports
import pandas as pd
import numpy as np
df = pd.read_csv('seek_australia_sample.csv', encoding='latin1')
print(f'Our data set has {df.shape[0]} records and {df.shape[1]} features or columns.')
# Identify initial records in the data
df.head()
print('Checking the data consistency')
df.isnull().sum()
df.drop(columns=['state','salary_offered'], inplace=True)
df.dropna(inplace=True)
print(f'After removing empty records our data set has {df.shape[0]} records and {df.shape[1]} features or columns.')
df.isnull().sum()
!pip install --upgrade pip
!pip install -U nltk
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
import string
table = str.maketrans('','', string.punctuation)
from nltk.tokenize import word_tokenize # Word Tokenizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words = set(stop_words)
from nltk.stem.wordnet import WordNetLemmatizer # Word Lemmatizer
lemmatizer = WordNetLemmatizer()
def clean_text(text):
"""
Cleaning the document before vectorization.
"""
# Tokenize by word
tokens = word_tokenize(text)
# Make all words lowercase
lowercase_tokens = [w.lower() for w in tokens]
# Strip punctuation from within words
no_punctuation = [x.translate(table) for x in lowercase_tokens]
# Remove words that aren't alphabetic
alphabetic = [word for word in no_punctuation if word.isalpha()]
# Remove stopwords
no_stop_words = [w for w in alphabetic if not w in stop_words]
# Lemmatize words
lemmas = [lemmatizer.lemmatize(word) for word in no_stop_words]
return lemmas
# Clean up the text
df['cleaned_text'] = df.job_description.apply(clean_text)
# 1st Sample User Information
user_title = "Business Analyst"
user_info_summary = """Detail-oriented and proactive Business Analyst with a history of involvement in IT, supply chain and CRM projects.
Always positive, team-focused and actively striving to build my domain knowledge and technical skills to deliver successful outcomes in complex project environments.
BA techniques and underlying competencies include:
Requirements elicitation and documentation
User stories, use cases, feature mapping
Process modelling as-is & to-be
Scope analysis
Stakeholder management
Interviews
Requirements traceability
Data mapping
System testing
Business integration
User guides & training
Problem solving
Creative thinking
Teamwork
Excellent written and oral communication
Facilitation
Leadership
Mentoring (IBL students)"""
# Clean up the user input
cleaned_user_summary = clean_text(user_info_summary)
def get_jaccard_sim(str1, str2):
a = set(str1)
b = set(str2)
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
df_match_by_title = df[df['job_title']==user_title].copy()
df_match_by_title['jaccard_sim_value'] = df_match_by_title.cleaned_text.apply(get_jaccard_sim, args=(cleaned_user_summary,))
sort_by_jaccard_sim = df_match_by_title.sort_values('jaccard_sim_value', ascending=False)
sort_by_jaccard_sim.head(5)
# 2nd User Information
user_preferred_job_type = "Full Time"
user_title = "Senior Accountant"
user_info_summary = """Seasoned finance professional with 7 years of accelerating career in management finance and accounting and analytical roles:
Proficient in overall financial management & analysis, revenue recognition, accrual accounting, Statutory audit, US GAAP reporting, SOX compliance, intercompany consolidation, risk mitigation, fiscal planning, budgeting and reporting, tax strategies;
Hold a Certification in Accounting from California, Bachelors degree and hands on with , NetSuite ERP, SAP Finance, Xero, MYOB and QuickBooks;
Experience working for listed multinational companies with internal controls in various geographic locations brings immense cultural exposure, leadership and a charismatic personality. """
# Clean up the user input
cleaned_user_summary = clean_text(user_info_summary)
df_match = df[(df['job_title'] == user_title) & (df['job_type']==user_preferred_job_type)].copy()
df_match['jaccard_sim_value'] = df_match.cleaned_text.apply(get_jaccard_sim, args=(cleaned_user_summary,))
sort_by_jaccard_sim = df_match.sort_values('jaccard_sim_value', ascending=False)
sort_by_jaccard_sim.head(5)
| 0.430866 | 0.919787 |
```
#|default_exp audio.mixup
```
# MixUp & Friends for Audio
> Apply MixUp, CutMix, and combination of both to audio waveforms before converting to Spectrogram or MelSpectrogram
```
#|export
from __future__ import annotations
from torch.distributions.beta import Beta
from fastcore.transform import Pipeline
from fastai.callback.mixup import MixHandler, reduce_loss
from fastai.layers import NoneReduce
from fastxtend.audio.data import MelSpectrogram, Spectrogram
from fastxtend.audio.augment import AmplitudeToDB, AudioNormalize
from fastxtend.imports import *
#|hide
from nbdev.showdoc import *
```
## AudioMixHandler -
```
#|export
class AudioMixHandler(MixHandler):
"Mixup base for `TensorAudio`"
def __init__(self, alpha=0.5, stack_y=True):
super().__init__(alpha)
self.stack_y = stack_y
def before_fit(self):
waveforms, wave, spec = True, [], []
self._wave_pipe = Pipeline([])
self._spec_pipe = Pipeline([])
# first copy transforms
self._orig_pipe = self.dls.train.after_batch
# loop through existing transforms appending to pre_spec/post_spec until Spec/Mel is found
for i in range(len(self.dls.train.after_batch.fs)):
if isinstance(self.dls.train.after_batch[i], (Spectrogram, MelSpectrogram)):
waveforms = False
if waveforms: wave.append(self.dls.train.after_batch[i])
else: spec.append(self.dls.train.after_batch[i])
self._wave_pipe.add(wave)
self._spec_pipe.add(spec)
# set existing transforms to an empty Pipeline
self.dls.train.after_batch = Pipeline([])
def before_train(self):
if self.stack_y: self.old_lf,self.learn.loss_func = self.learn.loss_func,self.lf
def after_train(self):
if self.stack_y: self.learn.loss_func = self.old_lf
def after_fit(self):
self.dls.train.after_batch = self._orig_pipe
def after_cancel_fit(self):
self.after_fit()
super().after_cancel_fit()
```
## AudioMixUp -
```
#|export
class AudioMixUp(AudioMixHandler):
"Implementation of https://arxiv.org/abs/1710.09412 for `TensorAudio`"
def __init__(self, alpha=0.5, stack_y=True):
super().__init__(alpha, stack_y)
def before_batch(self, wave=True):
if wave: self.learn.xb = self._wave_pipe(self.xb)
lam = self.distrib.sample((self.y.size(0),)).squeeze().to(self.x.device)
lam = torch.stack([lam, 1-lam], 1)
self.lam = lam.max(1)[0]
shuffle = torch.randperm(self.y.size(0)).to(self.x.device)
xb1,self.yb1 = tuple(L(self.xb).itemgot(shuffle)),tuple(L(self.yb).itemgot(shuffle))
nx_dims = len(self.x.size())
self.learn.xb = tuple(L(xb1,self.xb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=nx_dims-1)))
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
self.learn.xb = self._spec_pipe(self.xb)
```
## AudioCutMix -
```
#|export
class AudioCutMix(AudioMixHandler):
"Implementation of https://arxiv.org/abs/1710.09412 for `TensorAudio`"
def __init__(self, alpha=1., stack_y=True):
super().__init__(alpha, stack_y)
def before_batch(self, wave=True):
if wave: self.learn.xb = self._wave_pipe(self.xb)
bs, _, X = self.x.size()
self.lam = self.distrib.sample((1,)).to(self.x.device)
shuffle = torch.randperm(bs).to(self.x.device)
xb1,self.yb1 = self.x[shuffle], tuple((self.y[shuffle],))
x1, x2 = self.rand_cut(X, self.lam)
self.learn.xb[0][..., x1:x2] = xb1[..., x1:x2]
self.lam = (1 - (x2-x1)/float(X))
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
self.learn.xb = self._spec_pipe(self.xb)
def rand_cut(self, X, lam):
cut_rat = torch.sqrt(1. - lam).to(self.x.device)
cut_x = torch.round(X * cut_rat).type(torch.long).to(self.x.device)
cut_x = torch.div(cut_x, 2, rounding_mode='floor')
# uniform
cx = torch.randint(0, X, (1,)).to(self.x.device)
x1 = torch.clamp(cx - cut_x, 0, X)
x2 = torch.clamp(cx + cut_x, 0, X)
return x1, x2
```
## AudioCutMixUp -
```
#|export
class AudioCutMixUp(AudioMixUp, AudioCutMix):
"Implementation of Mixup or CutMix for `TensorAudio`"
def __init__(self, mix_alpha=.4, cut_alpha=1., stack_y=True, cut_ratio=1, mix_ratio=1):
AudioMixUp.__init__(self, mix_alpha, stack_y)
AudioCutMix.__init__(self, cut_alpha, stack_y)
self.mix_distrib = Beta(tensor(mix_alpha), tensor(mix_alpha))
self.cut_distrib = Beta(tensor(cut_alpha), tensor(cut_alpha))
self.ratio = mix_ratio / (cut_ratio + mix_ratio)
def before_batch(self):
if torch.rand(1) <= self.ratio: #mixup
self.distrib = self.mix_distrib
AudioMixUp.before_batch(self)
else:
self.distrib = self.cut_distrib
AudioCutMix.before_batch(self)
```
## AudioCutMixUpAugment -
```
#|export
class AudioCutMixUpAugment(AudioMixUp, AudioCutMix):
"Implementation of Mixup, CutMix, or Augment for `TensorAudio`"
def __init__(self, mix_alpha=.4, cut_alpha=1., stack_y=True, aug_ratio=1, cut_ratio=1, mix_ratio=1, augs_only=None, wave_augs=False):
AudioMixUp.__init__(self, mix_alpha, stack_y)
AudioCutMix.__init__(self, cut_alpha, stack_y)
self.mix_distrib = Beta(tensor(mix_alpha), tensor(mix_alpha))
self.cut_distrib = Beta(tensor(cut_alpha), tensor(cut_alpha))
self.aug_cutmix_ratio = aug_ratio / (aug_ratio + cut_ratio + mix_ratio)
if self.aug_cutmix_ratio == 1: self.cut_mix_ratio = 0
else: self.cut_mix_ratio = mix_ratio / (cut_ratio + mix_ratio)
self.augs_only = augs_only
self.wave_augs = wave_augs
def before_fit(self):
if self.augs_only is None: self.augs_only = (self.learn.n_epoch + 1)/self.learn.n_epoch
elif self.augs_only >=1: self.augs_only = self.augs_only/self.learn.n_epoch
else: self.augs_only = self.augs_only
waveforms, wave, spec, norm = True, [], [], []
self._wave_pipe = Pipeline([])
self._spec_pipe = Pipeline([])
self._norm_pipe = Pipeline([])
# first copy transforms
self._orig_pipe = self.dls.train.after_batch
self._orig_pipe.split_idx = 0 # need to manually set split_idx for training augmentations to run
# loop through existing transforms appending to pre_spec/post_spec until Spec/Mel is found
for i in range(len(self.dls.train.after_batch.fs)):
if isinstance(self.dls.train.after_batch[i], (Spectrogram, MelSpectrogram)):
waveforms = False
if waveforms:
wave.append(self.dls.train.after_batch[i])
else:
if isinstance(self.dls.train.after_batch[i], (AmplitudeToDB, AudioNormalize)):
norm.append(self.dls.train.after_batch[i])
elif isinstance(self.dls.train.after_batch[i], (Spectrogram, MelSpectrogram)):
spec.append(self.dls.train.after_batch[i])
self._wave_pipe.add(wave)
self._spec_pipe.add(spec)
self._norm_pipe.add(norm)
# set existing transforms to an empty Pipeline
self.dls.train.after_batch = Pipeline([])
def before_batch(self):
if self.augs_only >= self.learn.pct_train and torch.rand(1) >= self.aug_cutmix_ratio: # augs or mixup/cutmix
self._aug = False
if self.cut_mix_ratio > 0 and torch.rand(1) <= self.cut_mix_ratio: # mixup or cutmix
self.distrib = self.mix_distrib
AudioMixUp.before_batch(self, self.wave_augs)
else:
self.distrib = self.cut_distrib
AudioCutMix.before_batch(self, self.wave_augs)
self.learn.xb = self._norm_pipe(self.xb) # now normalize
else:
self._aug = True
self.learn.xb = self._orig_pipe(self.xb) # original transforms
def after_cancel_fit(self):
self.after_fit()
AudioMixUp.after_cancel_fit(self)
def lf(self, pred, *yb):
if not self.training or self._aug: return self.old_lf(pred, *yb)
with NoneReduce(self.old_lf) as lf:
loss = torch.lerp(lf(pred,*self.yb1), lf(pred,*yb), self.lam)
return reduce_loss(loss, getattr(self.old_lf, 'reduction', 'mean'))
```
|
github_jupyter
|
#|default_exp audio.mixup
#|export
from __future__ import annotations
from torch.distributions.beta import Beta
from fastcore.transform import Pipeline
from fastai.callback.mixup import MixHandler, reduce_loss
from fastai.layers import NoneReduce
from fastxtend.audio.data import MelSpectrogram, Spectrogram
from fastxtend.audio.augment import AmplitudeToDB, AudioNormalize
from fastxtend.imports import *
#|hide
from nbdev.showdoc import *
#|export
class AudioMixHandler(MixHandler):
"Mixup base for `TensorAudio`"
def __init__(self, alpha=0.5, stack_y=True):
super().__init__(alpha)
self.stack_y = stack_y
def before_fit(self):
waveforms, wave, spec = True, [], []
self._wave_pipe = Pipeline([])
self._spec_pipe = Pipeline([])
# first copy transforms
self._orig_pipe = self.dls.train.after_batch
# loop through existing transforms appending to pre_spec/post_spec until Spec/Mel is found
for i in range(len(self.dls.train.after_batch.fs)):
if isinstance(self.dls.train.after_batch[i], (Spectrogram, MelSpectrogram)):
waveforms = False
if waveforms: wave.append(self.dls.train.after_batch[i])
else: spec.append(self.dls.train.after_batch[i])
self._wave_pipe.add(wave)
self._spec_pipe.add(spec)
# set existing transforms to an empty Pipeline
self.dls.train.after_batch = Pipeline([])
def before_train(self):
if self.stack_y: self.old_lf,self.learn.loss_func = self.learn.loss_func,self.lf
def after_train(self):
if self.stack_y: self.learn.loss_func = self.old_lf
def after_fit(self):
self.dls.train.after_batch = self._orig_pipe
def after_cancel_fit(self):
self.after_fit()
super().after_cancel_fit()
#|export
class AudioMixUp(AudioMixHandler):
"Implementation of https://arxiv.org/abs/1710.09412 for `TensorAudio`"
def __init__(self, alpha=0.5, stack_y=True):
super().__init__(alpha, stack_y)
def before_batch(self, wave=True):
if wave: self.learn.xb = self._wave_pipe(self.xb)
lam = self.distrib.sample((self.y.size(0),)).squeeze().to(self.x.device)
lam = torch.stack([lam, 1-lam], 1)
self.lam = lam.max(1)[0]
shuffle = torch.randperm(self.y.size(0)).to(self.x.device)
xb1,self.yb1 = tuple(L(self.xb).itemgot(shuffle)),tuple(L(self.yb).itemgot(shuffle))
nx_dims = len(self.x.size())
self.learn.xb = tuple(L(xb1,self.xb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=nx_dims-1)))
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
self.learn.xb = self._spec_pipe(self.xb)
#|export
class AudioCutMix(AudioMixHandler):
"Implementation of https://arxiv.org/abs/1710.09412 for `TensorAudio`"
def __init__(self, alpha=1., stack_y=True):
super().__init__(alpha, stack_y)
def before_batch(self, wave=True):
if wave: self.learn.xb = self._wave_pipe(self.xb)
bs, _, X = self.x.size()
self.lam = self.distrib.sample((1,)).to(self.x.device)
shuffle = torch.randperm(bs).to(self.x.device)
xb1,self.yb1 = self.x[shuffle], tuple((self.y[shuffle],))
x1, x2 = self.rand_cut(X, self.lam)
self.learn.xb[0][..., x1:x2] = xb1[..., x1:x2]
self.lam = (1 - (x2-x1)/float(X))
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
self.learn.xb = self._spec_pipe(self.xb)
def rand_cut(self, X, lam):
cut_rat = torch.sqrt(1. - lam).to(self.x.device)
cut_x = torch.round(X * cut_rat).type(torch.long).to(self.x.device)
cut_x = torch.div(cut_x, 2, rounding_mode='floor')
# uniform
cx = torch.randint(0, X, (1,)).to(self.x.device)
x1 = torch.clamp(cx - cut_x, 0, X)
x2 = torch.clamp(cx + cut_x, 0, X)
return x1, x2
#|export
class AudioCutMixUp(AudioMixUp, AudioCutMix):
"Implementation of Mixup or CutMix for `TensorAudio`"
def __init__(self, mix_alpha=.4, cut_alpha=1., stack_y=True, cut_ratio=1, mix_ratio=1):
AudioMixUp.__init__(self, mix_alpha, stack_y)
AudioCutMix.__init__(self, cut_alpha, stack_y)
self.mix_distrib = Beta(tensor(mix_alpha), tensor(mix_alpha))
self.cut_distrib = Beta(tensor(cut_alpha), tensor(cut_alpha))
self.ratio = mix_ratio / (cut_ratio + mix_ratio)
def before_batch(self):
if torch.rand(1) <= self.ratio: #mixup
self.distrib = self.mix_distrib
AudioMixUp.before_batch(self)
else:
self.distrib = self.cut_distrib
AudioCutMix.before_batch(self)
#|export
class AudioCutMixUpAugment(AudioMixUp, AudioCutMix):
"Implementation of Mixup, CutMix, or Augment for `TensorAudio`"
def __init__(self, mix_alpha=.4, cut_alpha=1., stack_y=True, aug_ratio=1, cut_ratio=1, mix_ratio=1, augs_only=None, wave_augs=False):
AudioMixUp.__init__(self, mix_alpha, stack_y)
AudioCutMix.__init__(self, cut_alpha, stack_y)
self.mix_distrib = Beta(tensor(mix_alpha), tensor(mix_alpha))
self.cut_distrib = Beta(tensor(cut_alpha), tensor(cut_alpha))
self.aug_cutmix_ratio = aug_ratio / (aug_ratio + cut_ratio + mix_ratio)
if self.aug_cutmix_ratio == 1: self.cut_mix_ratio = 0
else: self.cut_mix_ratio = mix_ratio / (cut_ratio + mix_ratio)
self.augs_only = augs_only
self.wave_augs = wave_augs
def before_fit(self):
if self.augs_only is None: self.augs_only = (self.learn.n_epoch + 1)/self.learn.n_epoch
elif self.augs_only >=1: self.augs_only = self.augs_only/self.learn.n_epoch
else: self.augs_only = self.augs_only
waveforms, wave, spec, norm = True, [], [], []
self._wave_pipe = Pipeline([])
self._spec_pipe = Pipeline([])
self._norm_pipe = Pipeline([])
# first copy transforms
self._orig_pipe = self.dls.train.after_batch
self._orig_pipe.split_idx = 0 # need to manually set split_idx for training augmentations to run
# loop through existing transforms appending to pre_spec/post_spec until Spec/Mel is found
for i in range(len(self.dls.train.after_batch.fs)):
if isinstance(self.dls.train.after_batch[i], (Spectrogram, MelSpectrogram)):
waveforms = False
if waveforms:
wave.append(self.dls.train.after_batch[i])
else:
if isinstance(self.dls.train.after_batch[i], (AmplitudeToDB, AudioNormalize)):
norm.append(self.dls.train.after_batch[i])
elif isinstance(self.dls.train.after_batch[i], (Spectrogram, MelSpectrogram)):
spec.append(self.dls.train.after_batch[i])
self._wave_pipe.add(wave)
self._spec_pipe.add(spec)
self._norm_pipe.add(norm)
# set existing transforms to an empty Pipeline
self.dls.train.after_batch = Pipeline([])
def before_batch(self):
if self.augs_only >= self.learn.pct_train and torch.rand(1) >= self.aug_cutmix_ratio: # augs or mixup/cutmix
self._aug = False
if self.cut_mix_ratio > 0 and torch.rand(1) <= self.cut_mix_ratio: # mixup or cutmix
self.distrib = self.mix_distrib
AudioMixUp.before_batch(self, self.wave_augs)
else:
self.distrib = self.cut_distrib
AudioCutMix.before_batch(self, self.wave_augs)
self.learn.xb = self._norm_pipe(self.xb) # now normalize
else:
self._aug = True
self.learn.xb = self._orig_pipe(self.xb) # original transforms
def after_cancel_fit(self):
self.after_fit()
AudioMixUp.after_cancel_fit(self)
def lf(self, pred, *yb):
if not self.training or self._aug: return self.old_lf(pred, *yb)
with NoneReduce(self.old_lf) as lf:
loss = torch.lerp(lf(pred,*self.yb1), lf(pred,*yb), self.lam)
return reduce_loss(loss, getattr(self.old_lf, 'reduction', 'mean'))
| 0.873363 | 0.813831 |
# Principal Component Analysis (PCA)
We will implement the PCA algorithm. We will first implement PCA, then apply it (once again) to the MNIST digit dataset.
## Learning objective
1. Write code that implements PCA.
2. Write code that implements PCA for high-dimensional datasets
Let's first import the packages we need for this week.
```
# PACKAGE: DO NOT EDIT THIS CELL
import numpy as np
import timeit
# PACKAGE: DO NOT EDIT THIS CELL
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
from ipywidgets import interact
from load_data import load_mnist
MNIST = load_mnist()
images, labels = MNIST['data'], MNIST['target']
%matplotlib inline
```
Now, let's plot a digit from the dataset:
```
plt.figure(figsize=(4,4))
plt.imshow(images[0].reshape(28,28), cmap='gray');
```
Before we implement PCA, we will need to do some data preprocessing. In this assessment, some of them
will be implemented by you, others we will take care of. However, when you are working on real world problems, you will need to do all these steps by yourself!
The preprocessing steps we will do are
1. Convert unsigned interger 8 (uint8) encoding of pixels to a floating point number between 0-1.
2. Subtract from each image the mean $\boldsymbol \mu$.
3. Scale each dimension of each image by $\frac{1}{\sigma}$ where $\sigma$ is the stardard deviation.
The steps above ensure that our images will have zero mean and one variance. These preprocessing
steps are also known as [Data Normalization or Feature Scaling](https://en.wikipedia.org/wiki/Feature_scaling).
## 1. PCA
Now we will implement PCA. Before we do that, let's pause for a moment and
think about the steps for performing PCA. Assume that we are performing PCA on
some dataset $\boldsymbol X$ for $M$ principal components.
We then need to perform the following steps, which we break into parts:
1. Data normalization (`normalize`).
2. Find eigenvalues and corresponding eigenvectors for the covariance matrix $S$.
Sort by the largest eigenvalues and the corresponding eigenvectors (`eig`).
After these steps, we can then compute the projection and reconstruction of the data onto the spaced spanned by the top $n$ eigenvectors.
```
# GRADED FUNCTION: DO NOT EDIT THIS LINE
def normalize(X):
"""Normalize the given dataset X
Args:
X: ndarray, dataset
Returns:
(Xbar, mean, std): tuple of ndarray, Xbar is the normalized dataset
with mean 0 and standard deviation 1; mean and std are the
mean and standard deviation respectively.
Note:
You will encounter dimensions where the standard deviation is
zero, for those when you do normalization the normalized data
will be NaN. Handle this by setting using `std = 1` for those
dimensions when doing normalization.
"""
mu = np.mean(X, axis = 0) # <-- EDIT THIS, compute the mean of X
std = np.std(X, axis=0)
std_filled = std.copy()
std_filled[std==0] = 1.
Xbar = (X - mu)/std_filled # <-- EDIT THIS, compute the normalized data Xbar
return Xbar, mu, std
def eig(S):
"""Compute the eigenvalues and corresponding eigenvectors
for the covariance matrix S.
Args:
S: ndarray, covariance matrix
Returns:
(eigvals, eigvecs): ndarray, the eigenvalues and eigenvectors
Note:
the eigenvals and eigenvecs should be sorted in descending
order of the eigen values
"""
eigvals, eigvecs = np.linalg.eig(S)
idx = eigvals.argsort()[::-1]
eigvals = eigvals[idx]
eigvecs = eigvecs[:,idx]
return (eigvals, eigvecs) # <-- EDIT THIS to return the eigenvalues and corresponding eigenvectors
def projection_matrix(B):
"""Compute the projection matrix onto the space spanned by `B`
Args:
B: ndarray of dimension (D, M), the basis for the subspace
Returns:
P: the projection matrix
"""
return (B@(np.linalg.inv(B.T@B))@B.T) # <-- EDIT THIS to compute the projection matrix
def PCA(X, num_components):
"""
Args:
X: ndarray of size (N, D), where D is the dimension of the data,
and N is the number of datapoints
num_components: the number of principal components to use.
Returns:
X_reconstruct: ndarray of the reconstruction
of X from the first `num_components` principal components.
"""
X, mean, std = normalize(X)
S = np.cov(X, rowvar=False, bias=True)
_, eigvecs = eig(S)
P = projection_matrix(eigvecs[:, :num_components])
X = (P@X.T).T
# your solution should take advantage of the functions you have implemented above.
return X # <-- EDIT THIS to return the reconstruction of X
## Some preprocessing of the data
NUM_DATAPOINTS = 1000
X = (images.reshape(-1, 28 * 28)[:NUM_DATAPOINTS]) / 255.
Xbar, mu, std = normalize(X)
for num_component in range(1, 20):
from sklearn.decomposition import PCA as SKPCA
# We can compute a standard solution given by scikit-learn's implementation of PCA
pca = SKPCA(n_components=num_component, svd_solver='full')
sklearn_reconst = pca.inverse_transform(pca.fit_transform(Xbar))
reconst = PCA(Xbar, num_component)
np.testing.assert_almost_equal(reconst, sklearn_reconst)
print(np.square(reconst - sklearn_reconst).sum())
```
The greater number of of principal components we use, the smaller will our reconstruction
error be. Now, let's answer the following question:
> How many principal components do we need
> in order to reach a Mean Squared Error (MSE) of less than $100$ for our dataset?
We have provided a function in the next cell which computes the mean squared error (MSE), which will be useful for answering the question above.
```
def mse(predict, actual):
"""Helper function for computing the mean squared error (MSE)"""
return np.square(predict - actual).sum(axis=1).mean()
loss = []
reconstructions = []
# iterate over different number of principal components, and compute the MSE
for num_component in range(1, 100):
reconst = PCA(Xbar, num_component)
error = mse(reconst, Xbar)
reconstructions.append(reconst)
# print('n = {:d}, reconstruction_error = {:f}'.format(num_component, error))
loss.append((num_component, error))
reconstructions = np.asarray(reconstructions)
reconstructions = reconstructions * std + mu # "unnormalize" the reconstructed image
loss = np.asarray(loss)
import pandas as pd
# create a table showing the number of principal components and MSE
pd.DataFrame(loss).head()
```
We can also put these numbers into perspective by plotting them.
```
fig, ax = plt.subplots()
ax.plot(loss[:,0], loss[:,1]);
ax.axhline(100, linestyle='--', color='r', linewidth=2)
ax.xaxis.set_ticks(np.arange(1, 100, 5));
ax.set(xlabel='num_components', ylabel='MSE', title='MSE vs number of principal components');
```
But _numbers dont't tell us everything_! Just what does it mean _qualitatively_ for the loss to decrease from around
$450.0$ to less than $100.0$?
Let's find out! In the next cell, we draw the the leftmost image is the original dight. Then we show the reconstruction of the image on the right, in descending number of principal components used.
```
@interact(image_idx=(0, 1000))
def show_num_components_reconst(image_idx):
fig, ax = plt.subplots(figsize=(20., 20.))
actual = X[image_idx]
# concatenate the actual and reconstructed images as large image before plotting it
x = np.concatenate([actual[np.newaxis, :], reconstructions[:, image_idx]]).astype(dtype=float)
ax.imshow(np.hstack(x.reshape(-1, 28, 28)[np.arange(10)]),
cmap='gray');
ax.axvline(28, color='orange', linewidth=2)
```
We can also browse throught the reconstructions for other digits. Once again, `interact` becomes handy for visualing the reconstruction.
```
@interact(i=(0, 10))
def show_pca_digits(i=1):
"""Show the i th digit and its reconstruction"""
plt.figure(figsize=(4,4))
actual_sample = X[i].reshape(28,28)
reconst_sample = (reconst[i, :] * std + mu).reshape(28, 28).astype(dtype=float)
plt.imshow(np.hstack([actual_sample, reconst_sample]), cmap='gray')
plt.show()
```
## 2. PCA for high-dimensional datasets
Sometimes, the dimensionality of our dataset may be larger than the number of samples we
have. Then it might be inefficient to perform PCA with your implementation above. Instead,
as mentioned in the lectures, you can implement PCA in a more efficient manner, which we
call "PCA for high dimensional data" (PCA_high_dim).
Below are the steps for performing PCA for high dimensional dataset
1. Compute the matrix $XX^T$ (a $N$ by $N$ matrix with $N << D$)
2. Compute eigenvalues $\lambda$s and eigenvectors $V$ for $XX^T$
3. Compute the eigenvectors for the original covariance matrix as $X^TV$. Choose the eigenvectors associated with the M largest eigenvalues to be the basis of the principal subspace $U$.
4. Compute the orthogonal projection of the data onto the subspace spanned by columns of $U$. Functions you wrote for earlier assignments will be useful.
```
# GRADED FUNCTION: DO NOT EDIT THIS LINE
### PCA for high dimensional datasets
def PCA_high_dim(X, n_components):
"""Compute PCA for small sample size but high-dimensional features.
Args:
X: ndarray of size (N, D), where D is the dimension of the sample,
and N is the number of samples
num_components: the number of principal components to use.
Returns:
X_reconstruct: (N, D) ndarray. the reconstruction
of X from the first `num_components` pricipal components.
"""
N, D = X.shape
M = (X @ X.T)/N
_, eigvecs = eig(M)
U = (X.T@eigvecs)[:,:n_components]
P = projection_matrix(U)
X = (P@X.T).T
return X # <-- EDIT THIS to return the reconstruction of X
```
Given the same dataset, `PCA_high_dim` and `PCA` should give the same output.
Assuming we have implemented `PCA`, correctly, we can then use `PCA` to test the correctness
of `PCA_high_dim`. Given the same dataset, `PCA` and `PCA_high_dim` should give identical results.
We can use this __invariant__
to test our implementation of PCA_high_dim, assuming that we have correctly implemented `PCA`.
```
np.testing.assert_almost_equal(PCA(Xbar, 2), PCA_high_dim(Xbar, 2))
```
Now let's compare the running time between `PCA` and `PCA_high_dim`.
__Tips__ for running benchmarks or computationally expensive code:
When you have some computation that takes up a non-negligible amount of time. Try separating
the code that produces output from the code that analyzes the result (e.g. plot the results, comput statistics of the results). In this way, you don't have to recompute when you want to produce more analysis.
The next cell includes a function that records the time taken for executing a function `f` by repeating it for `repeat` number of times. You do not need to modify the function but you can use it to compare the running time for functions which you are interested in knowing the running time.
```
def time(f, repeat=10):
times = []
for _ in range(repeat):
start = timeit.default_timer()
f()
stop = timeit.default_timer()
times.append(stop-start)
return np.mean(times), np.std(times)
```
We first benchmark the time taken to compute $\boldsymbol X^T\boldsymbol X$ and $\boldsymbol X\boldsymbol X^T$. Jupyter's magic command `%time` is quite handy.
The next cell finds the running time for computing `$X^TX$` and $XX^T$ for different dimensions of X.
```
times_mm0 = []
times_mm1 = []
# iterate over datasets of different size
for datasetsize in np.arange(4, 784, step=20):
XX = Xbar[:datasetsize] # select the first `datasetsize` samples in the dataset
# record the running time for computing X.T @ X
mu, sigma = time(lambda : XX.T @ XX)
times_mm0.append((datasetsize, mu, sigma))
# record the running time for computing X @ X.T
mu, sigma = time(lambda : XX @ XX.T)
times_mm1.append((datasetsize, mu, sigma))
times_mm0 = np.asarray(times_mm0)
times_mm1 = np.asarray(times_mm1)
```
Having recorded the running time for computing `X @ X.T` and `X @ X.T`, we can plot them.
```
fig, ax = plt.subplots()
ax.set(xlabel='size of dataset', ylabel='running time')
bar = ax.errorbar(times_mm0[:, 0], times_mm0[:, 1], times_mm0[:, 2], label="$X^T X$ (PCA)", linewidth=2)
ax.errorbar(times_mm1[:, 0], times_mm1[:, 1], times_mm1[:, 2], label="$X X^T$ (PCA_high_dim)", linewidth=2)
ax.legend();
```
Alternatively, use the `time` magic command for benchmarking functions.
```
%time Xbar.T @ Xbar
%time Xbar @ Xbar.T
pass # Put this here so that our output does not show result of computing `Xbar @ Xbar.T`
```
Next we benchmark PCA, PCA_high_dim.
```
times0 = []
times1 = []
# iterate over datasets of different size
for datasetsize in np.arange(4, 784, step=100):
XX = Xbar[:datasetsize]
npc = 2
mu, sigma = time(lambda : PCA(XX, npc), repeat=10)
times0.append((datasetsize, mu, sigma))
mu, sigma = time(lambda : PCA_high_dim(XX, npc), repeat=10)
times1.append((datasetsize, mu, sigma))
times0 = np.asarray(times0)
times1 = np.asarray(times1)
```
Let's plot the running time. Spend some time and think about what this plot means. We mentioned in lectures that PCA_high_dim are advantageous when
we have dataset size $N$ < data dimension $M$. Although our plot does not for the two running time does not intersect exactly at $N = M$, it does show the trend.
```
fig, ax = plt.subplots()
ax.set(xlabel='number of datapoints', ylabel='run time')
ax.errorbar(times0[:, 0], times0[:, 1], times0[:, 2], label="PCA", linewidth=2)
ax.errorbar(times1[:, 0], times1[:, 1], times1[:, 2], label="PCA_high_dim", linewidth=2)
ax.legend();
```
Again, with the magic command `time`.
```
%time PCA(Xbar, 2)
%time PCA_high_dim(Xbar, 2)
pass
```
|
github_jupyter
|
# PACKAGE: DO NOT EDIT THIS CELL
import numpy as np
import timeit
# PACKAGE: DO NOT EDIT THIS CELL
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
from ipywidgets import interact
from load_data import load_mnist
MNIST = load_mnist()
images, labels = MNIST['data'], MNIST['target']
%matplotlib inline
plt.figure(figsize=(4,4))
plt.imshow(images[0].reshape(28,28), cmap='gray');
# GRADED FUNCTION: DO NOT EDIT THIS LINE
def normalize(X):
"""Normalize the given dataset X
Args:
X: ndarray, dataset
Returns:
(Xbar, mean, std): tuple of ndarray, Xbar is the normalized dataset
with mean 0 and standard deviation 1; mean and std are the
mean and standard deviation respectively.
Note:
You will encounter dimensions where the standard deviation is
zero, for those when you do normalization the normalized data
will be NaN. Handle this by setting using `std = 1` for those
dimensions when doing normalization.
"""
mu = np.mean(X, axis = 0) # <-- EDIT THIS, compute the mean of X
std = np.std(X, axis=0)
std_filled = std.copy()
std_filled[std==0] = 1.
Xbar = (X - mu)/std_filled # <-- EDIT THIS, compute the normalized data Xbar
return Xbar, mu, std
def eig(S):
"""Compute the eigenvalues and corresponding eigenvectors
for the covariance matrix S.
Args:
S: ndarray, covariance matrix
Returns:
(eigvals, eigvecs): ndarray, the eigenvalues and eigenvectors
Note:
the eigenvals and eigenvecs should be sorted in descending
order of the eigen values
"""
eigvals, eigvecs = np.linalg.eig(S)
idx = eigvals.argsort()[::-1]
eigvals = eigvals[idx]
eigvecs = eigvecs[:,idx]
return (eigvals, eigvecs) # <-- EDIT THIS to return the eigenvalues and corresponding eigenvectors
def projection_matrix(B):
"""Compute the projection matrix onto the space spanned by `B`
Args:
B: ndarray of dimension (D, M), the basis for the subspace
Returns:
P: the projection matrix
"""
return (B@(np.linalg.inv(B.T@B))@B.T) # <-- EDIT THIS to compute the projection matrix
def PCA(X, num_components):
"""
Args:
X: ndarray of size (N, D), where D is the dimension of the data,
and N is the number of datapoints
num_components: the number of principal components to use.
Returns:
X_reconstruct: ndarray of the reconstruction
of X from the first `num_components` principal components.
"""
X, mean, std = normalize(X)
S = np.cov(X, rowvar=False, bias=True)
_, eigvecs = eig(S)
P = projection_matrix(eigvecs[:, :num_components])
X = (P@X.T).T
# your solution should take advantage of the functions you have implemented above.
return X # <-- EDIT THIS to return the reconstruction of X
## Some preprocessing of the data
NUM_DATAPOINTS = 1000
X = (images.reshape(-1, 28 * 28)[:NUM_DATAPOINTS]) / 255.
Xbar, mu, std = normalize(X)
for num_component in range(1, 20):
from sklearn.decomposition import PCA as SKPCA
# We can compute a standard solution given by scikit-learn's implementation of PCA
pca = SKPCA(n_components=num_component, svd_solver='full')
sklearn_reconst = pca.inverse_transform(pca.fit_transform(Xbar))
reconst = PCA(Xbar, num_component)
np.testing.assert_almost_equal(reconst, sklearn_reconst)
print(np.square(reconst - sklearn_reconst).sum())
def mse(predict, actual):
"""Helper function for computing the mean squared error (MSE)"""
return np.square(predict - actual).sum(axis=1).mean()
loss = []
reconstructions = []
# iterate over different number of principal components, and compute the MSE
for num_component in range(1, 100):
reconst = PCA(Xbar, num_component)
error = mse(reconst, Xbar)
reconstructions.append(reconst)
# print('n = {:d}, reconstruction_error = {:f}'.format(num_component, error))
loss.append((num_component, error))
reconstructions = np.asarray(reconstructions)
reconstructions = reconstructions * std + mu # "unnormalize" the reconstructed image
loss = np.asarray(loss)
import pandas as pd
# create a table showing the number of principal components and MSE
pd.DataFrame(loss).head()
fig, ax = plt.subplots()
ax.plot(loss[:,0], loss[:,1]);
ax.axhline(100, linestyle='--', color='r', linewidth=2)
ax.xaxis.set_ticks(np.arange(1, 100, 5));
ax.set(xlabel='num_components', ylabel='MSE', title='MSE vs number of principal components');
@interact(image_idx=(0, 1000))
def show_num_components_reconst(image_idx):
fig, ax = plt.subplots(figsize=(20., 20.))
actual = X[image_idx]
# concatenate the actual and reconstructed images as large image before plotting it
x = np.concatenate([actual[np.newaxis, :], reconstructions[:, image_idx]]).astype(dtype=float)
ax.imshow(np.hstack(x.reshape(-1, 28, 28)[np.arange(10)]),
cmap='gray');
ax.axvline(28, color='orange', linewidth=2)
@interact(i=(0, 10))
def show_pca_digits(i=1):
"""Show the i th digit and its reconstruction"""
plt.figure(figsize=(4,4))
actual_sample = X[i].reshape(28,28)
reconst_sample = (reconst[i, :] * std + mu).reshape(28, 28).astype(dtype=float)
plt.imshow(np.hstack([actual_sample, reconst_sample]), cmap='gray')
plt.show()
# GRADED FUNCTION: DO NOT EDIT THIS LINE
### PCA for high dimensional datasets
def PCA_high_dim(X, n_components):
"""Compute PCA for small sample size but high-dimensional features.
Args:
X: ndarray of size (N, D), where D is the dimension of the sample,
and N is the number of samples
num_components: the number of principal components to use.
Returns:
X_reconstruct: (N, D) ndarray. the reconstruction
of X from the first `num_components` pricipal components.
"""
N, D = X.shape
M = (X @ X.T)/N
_, eigvecs = eig(M)
U = (X.T@eigvecs)[:,:n_components]
P = projection_matrix(U)
X = (P@X.T).T
return X # <-- EDIT THIS to return the reconstruction of X
np.testing.assert_almost_equal(PCA(Xbar, 2), PCA_high_dim(Xbar, 2))
def time(f, repeat=10):
times = []
for _ in range(repeat):
start = timeit.default_timer()
f()
stop = timeit.default_timer()
times.append(stop-start)
return np.mean(times), np.std(times)
times_mm0 = []
times_mm1 = []
# iterate over datasets of different size
for datasetsize in np.arange(4, 784, step=20):
XX = Xbar[:datasetsize] # select the first `datasetsize` samples in the dataset
# record the running time for computing X.T @ X
mu, sigma = time(lambda : XX.T @ XX)
times_mm0.append((datasetsize, mu, sigma))
# record the running time for computing X @ X.T
mu, sigma = time(lambda : XX @ XX.T)
times_mm1.append((datasetsize, mu, sigma))
times_mm0 = np.asarray(times_mm0)
times_mm1 = np.asarray(times_mm1)
fig, ax = plt.subplots()
ax.set(xlabel='size of dataset', ylabel='running time')
bar = ax.errorbar(times_mm0[:, 0], times_mm0[:, 1], times_mm0[:, 2], label="$X^T X$ (PCA)", linewidth=2)
ax.errorbar(times_mm1[:, 0], times_mm1[:, 1], times_mm1[:, 2], label="$X X^T$ (PCA_high_dim)", linewidth=2)
ax.legend();
%time Xbar.T @ Xbar
%time Xbar @ Xbar.T
pass # Put this here so that our output does not show result of computing `Xbar @ Xbar.T`
times0 = []
times1 = []
# iterate over datasets of different size
for datasetsize in np.arange(4, 784, step=100):
XX = Xbar[:datasetsize]
npc = 2
mu, sigma = time(lambda : PCA(XX, npc), repeat=10)
times0.append((datasetsize, mu, sigma))
mu, sigma = time(lambda : PCA_high_dim(XX, npc), repeat=10)
times1.append((datasetsize, mu, sigma))
times0 = np.asarray(times0)
times1 = np.asarray(times1)
fig, ax = plt.subplots()
ax.set(xlabel='number of datapoints', ylabel='run time')
ax.errorbar(times0[:, 0], times0[:, 1], times0[:, 2], label="PCA", linewidth=2)
ax.errorbar(times1[:, 0], times1[:, 1], times1[:, 2], label="PCA_high_dim", linewidth=2)
ax.legend();
%time PCA(Xbar, 2)
%time PCA_high_dim(Xbar, 2)
pass
| 0.881717 | 0.989119 |
```
# get data
!wget -O surnames.txt -qq --no-check-certificate "https://drive.google.com/uc?export=download&id=1ji7dhr9FojPeV51dDlKRERIqr3vdZfhu"
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import accuracy_score, classification_report
if torch.cuda.is_available():
from torch.cuda import FloatTensor, LongTensor
else:
from torch import FloatTensor, LongTensor
SEED = 41
np.random.seed(SEED)
torch.cuda.manual_seed(SEED)
torch.manual_seed(SEED)
data, labels = [], []
with open('surnames.txt') as f:
for line in f:
surname, lang = line.strip().split('\t')
data.append(surname)
labels.append(lang)
for i in np.random.randint(0, len(data), 10):
print(data[i], labels[i])
```
## Split into training and test sets
```
from sklearn.model_selection import train_test_split
data_train, data_test, labels_train, labels_test = train_test_split(
data, labels, test_size=0.3, stratify=labels, random_state=42
)
```
## Data Preprocessing
```
symbols = set(symb for word in data_train for symb in word)
char2ind = {symb: ind + 1 for ind, symb in enumerate(symbols)}
char2ind[''] = 0
# prepare index for labels
lang2ind = {lang: ind for ind, lang in enumerate(set(labels_train))}
print(lang2ind)
```
### Batch Preparation
```
def iterate_batches(data,
labels,
char2ind,
lang2ind,
batch_size
):
labels = np.array([lang2ind[label] for label in labels])
data = np.array([[char2ind.get(symb, 0) for symb in word] for word in data])
indices = np.arange(len(data))
np.random.shuffle(indices)
for start in range(0, len(data), batch_size):
end = min(start + batch_size, len(data))
batch_indices = indices[start: end]
max_word_len = max(len(data[ind]) for ind in batch_indices)
X = np.zeros((max_word_len, len(batch_indices)))
for i, ind in enumerate(batch_indices):
X[:len(data[ind]), i] = data[ind]
yield X, labels[batch_indices]
from functools import partial
iterate_batches = partial(iterate_batches,
char2ind=char2ind,
lang2ind=lang2ind
)
next(iterate_batches(data, labels, batch_size=8))
```
### Model Definitions
```
class SimpleRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self._hidden_size = hidden_size
self.linear_layer = nn.Linear(self._hidden_size + input_size, self._hidden_size)
def forward(self, inputs, hidden=None):
seq_len, batch_size = inputs.shape[:2]
if hidden is None:
hidden = inputs.new_zeros((batch_size), self._hidden_size)
for i in range(seq_len):
formatted_input = torch.cat((hidden, inputs[i]), dim=1)
hidden = self.linear_layer(formatted_input)
return hidden
class MemorizerModel(nn.Module):
def __init__(self, hidden_size):
super().__init__()
input_size = 10
self.embedding = nn.Embedding(input_size, input_size)
self.embedding.weight = nn.Parameter(torch.eye(input_size))
self.model = nn.Sequential(self.embedding,
SimpleRNN(input_size, hidden_size),
nn.Linear(hidden_size, input_size)
)
def forward(self, inputs):
return self.model(inputs)
```
### Train MemorizerModel
```
rnn = MemorizerModel(hidden_size=16)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(rnn.parameters())
total_loss = 0
epochs_count = 1000
for epoch_ind in range(epochs_count):
X_train, y_train = generate_data(seq_len=25)
optimizer.zero_grad()
rnn.train()
logits = rnn(X_train)
loss = criterion(logits, y_train)
loss.backward()
optimizer.step()
total_loss += loss.item()
if (epoch_ind + 1) % 100 == 0:
rnn.eval()
with torch.no_grad():
logits = rnn(X_val)
val_loss = criterion(logits, y_val)
print('[{}/{}] Train: {:.3f} Val: {:.3f}'.format(epoch_ind + 1, epochs_count,
total_loss / 100, val_loss.item()))
total_loss = 0
-np.log(1 / 10)
```
### Surname Classifier
```
class SurnamesClassifier(nn.Module):
def __init__(self,
vocab_size,
emb_dim,
lstm_hidden_dim,
classes_count):
super().__init__()
self.embedding = nn.Embedding(vocab_size, emb_dim)
self.model = nn.Sequential(SimpleRNN(emb_dim,
lstm_hidden_dim),
nn.Linear(lstm_hidden_dim,
classes_count)
)
def forward(self, inputs):
'embed(inputs) -> prediction'
embeddings = self.embed(inputs)
return self.model(embeddings)
def embed(self, inputs):
'inputs -> word embedding'
return self.embedding(inputs)
class LSTMSurnamesClassifier(nn.Module):
def __init__(self,
vocab_size,
emb_dim,
lstm_hidden_dim,
classes_count):
super().__init__()
self.lstm_hidden_dim = lstm_hidden_dim
self.embedding = nn.Embedding(vocab_size, emb_dim)
self.lstm = nn.LSTM(emb_dim, lstm_hidden_dim)
self.output = nn.Linear(lstm_hidden_dim, classes_count)
self.dropout_layer = nn.Dropout(p=0.3)
def forward(self, inputs):
'embed(inputs) -> prediction'
self.hidden = self.init_hidden(inputs.size(-1))
embeddings = self.embed(inputs)
outputs, (ht, ct) = self.lstm(embeddings, self.hidden)
outputs = self.dropout_layer(ht[-1])
outputs = self.output(outputs)
return outputs
def embed(self, inputs):
'inputs -> word embedding'
return self.embedding(inputs)
def init_hidden(self, batch_size):
return(torch.randn(1, batch_size, self.lstm_hidden_dim).cuda(),
torch.randn(1, batch_size, self.lstm_hidden_dim).cuda())
class LSTMBidirectionalSurnamesClassifier(nn.Module):
def __init__(self,
vocab_size,
emb_dim,
lstm_hidden_dim,
classes_count):
super().__init__()
self.lstm_hidden_dim = lstm_hidden_dim
self.embedding = nn.Embedding(vocab_size, emb_dim)
self.lstm = nn.LSTM(emb_dim, lstm_hidden_dim, num_layers=1, bidirectional=True)
self.output = nn.Linear(lstm_hidden_dim, classes_count)
self.dropout_layer = nn.Dropout(p=0.3)
def forward(self, inputs):
'embed(inputs) -> prediction'
self.hidden = self.init_hidden(inputs.size(-1))
embeddings = self.embed(inputs)
outputs, (ht, ct) = self.lstm(embeddings, self.hidden)
outputs = self.dropout_layer(ht[-1])
outputs = self.output(outputs)
return outputs
def embed(self, inputs):
'inputs -> word embedding'
return self.embedding(inputs)
def init_hidden(self, batch_size):
return(torch.ones(1 * 2, batch_size, self.lstm_hidden_dim).cuda(),
torch.ones(1 * 2, batch_size, self.lstm_hidden_dim).cuda())
```
### Training Helper Methods
```
import math
import time
def do_epoch(model, criterion, data, batch_size, optimizer=None):
epoch_loss = 0.
is_train = not optimizer is None
model.train(is_train)
data, labels = data
batchs_count = math.ceil(len(data) / batch_size)
with torch.autograd.set_grad_enabled(is_train):
for i, (X_batch, y_batch) in enumerate(iterate_batches(data, labels, batch_size=batch_size)):
X_batch, y_batch = LongTensor(X_batch), LongTensor(y_batch)
logits = model(X_batch)
loss = criterion(logits, y_batch)
epoch_loss += loss.item()
if is_train:
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 1.)
optimizer.step()
print('\r[{} / {}]: Loss = {:.4f}'.format(i, batchs_count, loss.item()), end='')
return epoch_loss / batchs_count
def fit(model, criterion, optimizer, train_data, epochs_count=1,
batch_size=32, val_data=None, val_batch_size=None):
if not val_data is None and val_batch_size is None:
val_batch_size = batch_size
for epoch in range(epochs_count):
start_time = time.time()
train_loss = do_epoch(model, criterion, train_data, batch_size, optimizer)
output_info = '\rEpoch {} / {}, Epoch Time = {:.2f}s: Train Loss = {:.4f}'
if not val_data is None:
val_loss = do_epoch(model, criterion, val_data, val_batch_size, None)
epoch_time = time.time() - start_time
output_info += ', Val Loss = {:.4f}'
print(output_info.format(epoch+1, epochs_count, epoch_time, train_loss, val_loss))
else:
epoch_time = time.time() - start_time
print(output_info.format(epoch+1, epochs_count, epoch_time, train_loss))
```
### Training Loop
```
model = SurnamesClassifier(vocab_size=len(char2ind),
emb_dim=16,
lstm_hidden_dim=64,
classes_count=len(lang2ind)).cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters())
fit(model,
criterion,
optimizer,
epochs_count=60,
batch_size=128,
train_data=(data_train, labels_train),
val_data=(data_test, labels_test),
val_batch_size=512)
# model = LSTMSurnamesClassifier(vocab_size=len(char2ind),
# emb_dim=16,
# lstm_hidden_dim=64,
# classes_count=len(lang2ind)).cuda()
# criterion = nn.CrossEntropyLoss().cuda()
# optimizer = optim.Adam(model.parameters())
# fit(model,
# criterion,
# optimizer,
# epochs_count=60,
# batch_size=128,
# train_data=(data_train, labels_train),
# val_data=(data_test, labels_test),
# val_batch_size=512)
model = LSTMBidirectionalSurnamesClassifier(vocab_size=len(char2ind),
emb_dim=32,
lstm_hidden_dim=64,
classes_count=len(lang2ind)).cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters())
fit(model,
criterion,
optimizer,
epochs_count=60,
batch_size=128,
train_data=(data_train, labels_train),
val_data=(data_test, labels_test),
val_batch_size=512)
def predict_surname(model,
surname,
char2ind):
data = np.array([char2ind.get(symb, 0) for symb in surname])
max_len = len(surname)
X = np.zeros(shape=(max_len, 1))
for i in range(X.shape[1]):
for j, char in enumerate(data):
X[j, i] = char
logits = model(LongTensor(X)).cpu().detach().numpy()
max_logit_index = np.argmax(logits)
return max_logit_index
model.eval()
y_pred = [predict_surname(model, surname, char2ind) for surname in data_test]
y_test = [lang2ind[label] for label in labels_test]
print('Accuracy = {:.2%}'.format(accuracy_score(y_test, y_pred)))
print('Classification report:')
print(classification_report(y_test, y_pred,
target_names=[lang for lang, _ in sorted(lang2ind.items(), key=lambda x: x[1])]))
```
### Draw Vectors
```
import bokeh.models as bm, bokeh.plotting as pl
from bokeh.colors import RGB
from bokeh.io import output_notebook
from sklearn.manifold import TSNE
from sklearn.preprocessing import scale
def draw_vectors(x, y, radius=10, alpha=0.25, color='blue',
width=600, height=400, show=True, **kwargs):
""" draws an interactive plot for data points with auxilirary info on hover """
output_notebook()
if isinstance(color, str):
color = [color] * len(x)
if isinstance(color, np.ndarray):
color = [RGB(*x[:3]) for x in color]
print(color)
data_source = bm.ColumnDataSource({ 'x' : x, 'y' : y, 'color': color, **kwargs })
fig = pl.figure(active_scroll='wheel_zoom', width=width, height=height)
fig.scatter('x', 'y', size=radius, color='color', alpha=alpha, source=data_source)
fig.add_tools(bm.HoverTool(tooltips=[(key, "@" + key) for key in kwargs.keys()]))
if show:
pl.show(fig)
return fig
def get_tsne_projection(word_vectors):
tsne = TSNE(n_components=2, verbose=100)
return scale(tsne.fit_transform(word_vectors))
def visualize_embeddings(embeddings, token, colors):
tsne = get_tsne_projection(embeddings)
draw_vectors(tsne[:, 0], tsne[:, 1], color=colors, token=token)
word_indices = np.random.choice(np.arange(len(data_test)), 1000, replace=False)
words = [data_test[ind] for ind in word_indices]
word_labels = [labels_test[ind] for ind in word_indices]
model.eval()
X_batch, y_batch = next(iterate_batches(words, word_labels, batch_size=1000))
embeddings = model.embed(LongTensor(X_batch)).cpu().detach().numpy()[0]
colors = plt.cm.tab20(y_batch) * 255
visualize_embeddings(embeddings, words, colors)
```
### RNN Visualizer
```
class LSTMVisualizer(nn.Module):
def __init__(self,
vocab_size,
emb_dim,
lstm_hidden_dim,
classes_count):
super().__init__()
self.lstm_hidden_dim = lstm_hidden_dim
self.embedding = nn.Embedding(vocab_size, emb_dim)
self.lstm = nn.LSTM(emb_dim, lstm_hidden_dim)
self.output = nn.Linear(lstm_hidden_dim, classes_count)
self.dropout_layer = nn.Dropout(p=0.3)
def forward(self, inputs):
'embed(inputs) -> prediction'
self.hidden = self.init_hidden(inputs.size(-1))
embeddings = self.embed(inputs)
outputs, (ht, ct) = self.lstm(embeddings, self.hidden)
self.hs = outputs
outputs = self.dropout_layer(ht[-1])
outputs = self.output(outputs)
return outputs
def embed(self, inputs):
'inputs -> word embedding'
return self.embedding(inputs)
def init_hidden(self, batch_size):
return(torch.randn(1, batch_size, self.lstm_hidden_dim).cuda(),
torch.randn(1, batch_size, self.lstm_hidden_dim).cuda())
model = LSTMVisualizer(vocab_size=len(char2ind),
emb_dim=16,
lstm_hidden_dim=64,
classes_count=len(lang2ind)).cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters())
fit(model,
criterion,
optimizer,
epochs_count=60,
batch_size=128,
train_data=(data_train, labels_train),
val_data=(data_test, labels_test),
val_batch_size=512)
np.random.choice(np.where(np.array(labels_test) == 'English')[0], size=5)
np.array(data_test)[[4876, 45, 4649, 2273, 2104]]
model.eval()
surname = 'Flannery'
data = np.array([char2ind.get(symb, 0) for symb in surname])
max_len = len(surname)
X = np.zeros(shape=(max_len, 1))
for i in range(X.shape[1]):
X[:len(data), i] = data
logits = model(LongTensor(X)).cpu().detach().numpy()
len(model.hs)
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
if count < 8:
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 2, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(2):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 2, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(2):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
if count < 7:
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
if count < 8:
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 2, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(2):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
```
### Traditional Methods
```
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.metrics import log_loss
model = Pipeline([
('vectorizer', CountVectorizer(analyzer='char', ngram_range=(1, 4))),
('log_regression', LogisticRegression())
])
model.fit(data_train, labels_train)
preds = model.predict(data_test)
preds_prob = model.predict_proba(data_test)
print('Accuracy = {:.2%}'.format(accuracy_score(labels_test, preds)))
print('Log Loss = {:.5f}'.format(log_loss(labels_test, preds_prob)))
print('Classification report:')
print(classification_report(labels_test, preds))
```
|
github_jupyter
|
# get data
!wget -O surnames.txt -qq --no-check-certificate "https://drive.google.com/uc?export=download&id=1ji7dhr9FojPeV51dDlKRERIqr3vdZfhu"
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import accuracy_score, classification_report
if torch.cuda.is_available():
from torch.cuda import FloatTensor, LongTensor
else:
from torch import FloatTensor, LongTensor
SEED = 41
np.random.seed(SEED)
torch.cuda.manual_seed(SEED)
torch.manual_seed(SEED)
data, labels = [], []
with open('surnames.txt') as f:
for line in f:
surname, lang = line.strip().split('\t')
data.append(surname)
labels.append(lang)
for i in np.random.randint(0, len(data), 10):
print(data[i], labels[i])
from sklearn.model_selection import train_test_split
data_train, data_test, labels_train, labels_test = train_test_split(
data, labels, test_size=0.3, stratify=labels, random_state=42
)
symbols = set(symb for word in data_train for symb in word)
char2ind = {symb: ind + 1 for ind, symb in enumerate(symbols)}
char2ind[''] = 0
# prepare index for labels
lang2ind = {lang: ind for ind, lang in enumerate(set(labels_train))}
print(lang2ind)
def iterate_batches(data,
labels,
char2ind,
lang2ind,
batch_size
):
labels = np.array([lang2ind[label] for label in labels])
data = np.array([[char2ind.get(symb, 0) for symb in word] for word in data])
indices = np.arange(len(data))
np.random.shuffle(indices)
for start in range(0, len(data), batch_size):
end = min(start + batch_size, len(data))
batch_indices = indices[start: end]
max_word_len = max(len(data[ind]) for ind in batch_indices)
X = np.zeros((max_word_len, len(batch_indices)))
for i, ind in enumerate(batch_indices):
X[:len(data[ind]), i] = data[ind]
yield X, labels[batch_indices]
from functools import partial
iterate_batches = partial(iterate_batches,
char2ind=char2ind,
lang2ind=lang2ind
)
next(iterate_batches(data, labels, batch_size=8))
class SimpleRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self._hidden_size = hidden_size
self.linear_layer = nn.Linear(self._hidden_size + input_size, self._hidden_size)
def forward(self, inputs, hidden=None):
seq_len, batch_size = inputs.shape[:2]
if hidden is None:
hidden = inputs.new_zeros((batch_size), self._hidden_size)
for i in range(seq_len):
formatted_input = torch.cat((hidden, inputs[i]), dim=1)
hidden = self.linear_layer(formatted_input)
return hidden
class MemorizerModel(nn.Module):
def __init__(self, hidden_size):
super().__init__()
input_size = 10
self.embedding = nn.Embedding(input_size, input_size)
self.embedding.weight = nn.Parameter(torch.eye(input_size))
self.model = nn.Sequential(self.embedding,
SimpleRNN(input_size, hidden_size),
nn.Linear(hidden_size, input_size)
)
def forward(self, inputs):
return self.model(inputs)
rnn = MemorizerModel(hidden_size=16)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(rnn.parameters())
total_loss = 0
epochs_count = 1000
for epoch_ind in range(epochs_count):
X_train, y_train = generate_data(seq_len=25)
optimizer.zero_grad()
rnn.train()
logits = rnn(X_train)
loss = criterion(logits, y_train)
loss.backward()
optimizer.step()
total_loss += loss.item()
if (epoch_ind + 1) % 100 == 0:
rnn.eval()
with torch.no_grad():
logits = rnn(X_val)
val_loss = criterion(logits, y_val)
print('[{}/{}] Train: {:.3f} Val: {:.3f}'.format(epoch_ind + 1, epochs_count,
total_loss / 100, val_loss.item()))
total_loss = 0
-np.log(1 / 10)
class SurnamesClassifier(nn.Module):
def __init__(self,
vocab_size,
emb_dim,
lstm_hidden_dim,
classes_count):
super().__init__()
self.embedding = nn.Embedding(vocab_size, emb_dim)
self.model = nn.Sequential(SimpleRNN(emb_dim,
lstm_hidden_dim),
nn.Linear(lstm_hidden_dim,
classes_count)
)
def forward(self, inputs):
'embed(inputs) -> prediction'
embeddings = self.embed(inputs)
return self.model(embeddings)
def embed(self, inputs):
'inputs -> word embedding'
return self.embedding(inputs)
class LSTMSurnamesClassifier(nn.Module):
def __init__(self,
vocab_size,
emb_dim,
lstm_hidden_dim,
classes_count):
super().__init__()
self.lstm_hidden_dim = lstm_hidden_dim
self.embedding = nn.Embedding(vocab_size, emb_dim)
self.lstm = nn.LSTM(emb_dim, lstm_hidden_dim)
self.output = nn.Linear(lstm_hidden_dim, classes_count)
self.dropout_layer = nn.Dropout(p=0.3)
def forward(self, inputs):
'embed(inputs) -> prediction'
self.hidden = self.init_hidden(inputs.size(-1))
embeddings = self.embed(inputs)
outputs, (ht, ct) = self.lstm(embeddings, self.hidden)
outputs = self.dropout_layer(ht[-1])
outputs = self.output(outputs)
return outputs
def embed(self, inputs):
'inputs -> word embedding'
return self.embedding(inputs)
def init_hidden(self, batch_size):
return(torch.randn(1, batch_size, self.lstm_hidden_dim).cuda(),
torch.randn(1, batch_size, self.lstm_hidden_dim).cuda())
class LSTMBidirectionalSurnamesClassifier(nn.Module):
def __init__(self,
vocab_size,
emb_dim,
lstm_hidden_dim,
classes_count):
super().__init__()
self.lstm_hidden_dim = lstm_hidden_dim
self.embedding = nn.Embedding(vocab_size, emb_dim)
self.lstm = nn.LSTM(emb_dim, lstm_hidden_dim, num_layers=1, bidirectional=True)
self.output = nn.Linear(lstm_hidden_dim, classes_count)
self.dropout_layer = nn.Dropout(p=0.3)
def forward(self, inputs):
'embed(inputs) -> prediction'
self.hidden = self.init_hidden(inputs.size(-1))
embeddings = self.embed(inputs)
outputs, (ht, ct) = self.lstm(embeddings, self.hidden)
outputs = self.dropout_layer(ht[-1])
outputs = self.output(outputs)
return outputs
def embed(self, inputs):
'inputs -> word embedding'
return self.embedding(inputs)
def init_hidden(self, batch_size):
return(torch.ones(1 * 2, batch_size, self.lstm_hidden_dim).cuda(),
torch.ones(1 * 2, batch_size, self.lstm_hidden_dim).cuda())
import math
import time
def do_epoch(model, criterion, data, batch_size, optimizer=None):
epoch_loss = 0.
is_train = not optimizer is None
model.train(is_train)
data, labels = data
batchs_count = math.ceil(len(data) / batch_size)
with torch.autograd.set_grad_enabled(is_train):
for i, (X_batch, y_batch) in enumerate(iterate_batches(data, labels, batch_size=batch_size)):
X_batch, y_batch = LongTensor(X_batch), LongTensor(y_batch)
logits = model(X_batch)
loss = criterion(logits, y_batch)
epoch_loss += loss.item()
if is_train:
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 1.)
optimizer.step()
print('\r[{} / {}]: Loss = {:.4f}'.format(i, batchs_count, loss.item()), end='')
return epoch_loss / batchs_count
def fit(model, criterion, optimizer, train_data, epochs_count=1,
batch_size=32, val_data=None, val_batch_size=None):
if not val_data is None and val_batch_size is None:
val_batch_size = batch_size
for epoch in range(epochs_count):
start_time = time.time()
train_loss = do_epoch(model, criterion, train_data, batch_size, optimizer)
output_info = '\rEpoch {} / {}, Epoch Time = {:.2f}s: Train Loss = {:.4f}'
if not val_data is None:
val_loss = do_epoch(model, criterion, val_data, val_batch_size, None)
epoch_time = time.time() - start_time
output_info += ', Val Loss = {:.4f}'
print(output_info.format(epoch+1, epochs_count, epoch_time, train_loss, val_loss))
else:
epoch_time = time.time() - start_time
print(output_info.format(epoch+1, epochs_count, epoch_time, train_loss))
model = SurnamesClassifier(vocab_size=len(char2ind),
emb_dim=16,
lstm_hidden_dim=64,
classes_count=len(lang2ind)).cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters())
fit(model,
criterion,
optimizer,
epochs_count=60,
batch_size=128,
train_data=(data_train, labels_train),
val_data=(data_test, labels_test),
val_batch_size=512)
# model = LSTMSurnamesClassifier(vocab_size=len(char2ind),
# emb_dim=16,
# lstm_hidden_dim=64,
# classes_count=len(lang2ind)).cuda()
# criterion = nn.CrossEntropyLoss().cuda()
# optimizer = optim.Adam(model.parameters())
# fit(model,
# criterion,
# optimizer,
# epochs_count=60,
# batch_size=128,
# train_data=(data_train, labels_train),
# val_data=(data_test, labels_test),
# val_batch_size=512)
model = LSTMBidirectionalSurnamesClassifier(vocab_size=len(char2ind),
emb_dim=32,
lstm_hidden_dim=64,
classes_count=len(lang2ind)).cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters())
fit(model,
criterion,
optimizer,
epochs_count=60,
batch_size=128,
train_data=(data_train, labels_train),
val_data=(data_test, labels_test),
val_batch_size=512)
def predict_surname(model,
surname,
char2ind):
data = np.array([char2ind.get(symb, 0) for symb in surname])
max_len = len(surname)
X = np.zeros(shape=(max_len, 1))
for i in range(X.shape[1]):
for j, char in enumerate(data):
X[j, i] = char
logits = model(LongTensor(X)).cpu().detach().numpy()
max_logit_index = np.argmax(logits)
return max_logit_index
model.eval()
y_pred = [predict_surname(model, surname, char2ind) for surname in data_test]
y_test = [lang2ind[label] for label in labels_test]
print('Accuracy = {:.2%}'.format(accuracy_score(y_test, y_pred)))
print('Classification report:')
print(classification_report(y_test, y_pred,
target_names=[lang for lang, _ in sorted(lang2ind.items(), key=lambda x: x[1])]))
import bokeh.models as bm, bokeh.plotting as pl
from bokeh.colors import RGB
from bokeh.io import output_notebook
from sklearn.manifold import TSNE
from sklearn.preprocessing import scale
def draw_vectors(x, y, radius=10, alpha=0.25, color='blue',
width=600, height=400, show=True, **kwargs):
""" draws an interactive plot for data points with auxilirary info on hover """
output_notebook()
if isinstance(color, str):
color = [color] * len(x)
if isinstance(color, np.ndarray):
color = [RGB(*x[:3]) for x in color]
print(color)
data_source = bm.ColumnDataSource({ 'x' : x, 'y' : y, 'color': color, **kwargs })
fig = pl.figure(active_scroll='wheel_zoom', width=width, height=height)
fig.scatter('x', 'y', size=radius, color='color', alpha=alpha, source=data_source)
fig.add_tools(bm.HoverTool(tooltips=[(key, "@" + key) for key in kwargs.keys()]))
if show:
pl.show(fig)
return fig
def get_tsne_projection(word_vectors):
tsne = TSNE(n_components=2, verbose=100)
return scale(tsne.fit_transform(word_vectors))
def visualize_embeddings(embeddings, token, colors):
tsne = get_tsne_projection(embeddings)
draw_vectors(tsne[:, 0], tsne[:, 1], color=colors, token=token)
word_indices = np.random.choice(np.arange(len(data_test)), 1000, replace=False)
words = [data_test[ind] for ind in word_indices]
word_labels = [labels_test[ind] for ind in word_indices]
model.eval()
X_batch, y_batch = next(iterate_batches(words, word_labels, batch_size=1000))
embeddings = model.embed(LongTensor(X_batch)).cpu().detach().numpy()[0]
colors = plt.cm.tab20(y_batch) * 255
visualize_embeddings(embeddings, words, colors)
class LSTMVisualizer(nn.Module):
def __init__(self,
vocab_size,
emb_dim,
lstm_hidden_dim,
classes_count):
super().__init__()
self.lstm_hidden_dim = lstm_hidden_dim
self.embedding = nn.Embedding(vocab_size, emb_dim)
self.lstm = nn.LSTM(emb_dim, lstm_hidden_dim)
self.output = nn.Linear(lstm_hidden_dim, classes_count)
self.dropout_layer = nn.Dropout(p=0.3)
def forward(self, inputs):
'embed(inputs) -> prediction'
self.hidden = self.init_hidden(inputs.size(-1))
embeddings = self.embed(inputs)
outputs, (ht, ct) = self.lstm(embeddings, self.hidden)
self.hs = outputs
outputs = self.dropout_layer(ht[-1])
outputs = self.output(outputs)
return outputs
def embed(self, inputs):
'inputs -> word embedding'
return self.embedding(inputs)
def init_hidden(self, batch_size):
return(torch.randn(1, batch_size, self.lstm_hidden_dim).cuda(),
torch.randn(1, batch_size, self.lstm_hidden_dim).cuda())
model = LSTMVisualizer(vocab_size=len(char2ind),
emb_dim=16,
lstm_hidden_dim=64,
classes_count=len(lang2ind)).cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters())
fit(model,
criterion,
optimizer,
epochs_count=60,
batch_size=128,
train_data=(data_train, labels_train),
val_data=(data_test, labels_test),
val_batch_size=512)
np.random.choice(np.where(np.array(labels_test) == 'English')[0], size=5)
np.array(data_test)[[4876, 45, 4649, 2273, 2104]]
model.eval()
surname = 'Flannery'
data = np.array([char2ind.get(symb, 0) for symb in surname])
max_len = len(surname)
X = np.zeros(shape=(max_len, 1))
for i in range(X.shape[1]):
X[:len(data), i] = data
logits = model(LongTensor(X)).cpu().detach().numpy()
len(model.hs)
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
if count < 8:
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 2, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(2):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 2, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(2):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
if count < 7:
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 3, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(3):
if count < 8:
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
fig, ax = plt.subplots(3, 2, figsize=(6, 6))
count = 0
for i in range(3):
for j in range(2):
ax[i][j].imshow(model.hs[count].cpu().detach().numpy().ravel().reshape(8, 8))
count += 1
plt.tight_layout();
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.metrics import log_loss
model = Pipeline([
('vectorizer', CountVectorizer(analyzer='char', ngram_range=(1, 4))),
('log_regression', LogisticRegression())
])
model.fit(data_train, labels_train)
preds = model.predict(data_test)
preds_prob = model.predict_proba(data_test)
print('Accuracy = {:.2%}'.format(accuracy_score(labels_test, preds)))
print('Log Loss = {:.5f}'.format(log_loss(labels_test, preds_prob)))
print('Classification report:')
print(classification_report(labels_test, preds))
| 0.858481 | 0.803521 |
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy.stats as stats
mpl.rcParams['figure.dpi'] = 100
mpl.rcParams['figure.figsize'] = (8, 6)
%config InlineBackend.figure_format = 'retina'
SAVE = True
def instantaneous_slope(y, x):
slope = np.zeros(len(x))
for i in range(len(x)):
if i == 0:
slope[0] = (y[1] - y[0]) / (x[1] - x[0])
elif i == len(x) - 1:
slope[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
else:
# slope[i] = (y[i+1] - y[i-1]) / (x[i+1] - x[i-1])
# slope[i] = (y[i] - y[i-1]) / (x[i] - x[i-1])
xp = x[i+1]
xc = x[i]
xm = x[i-1]
yp = y[i+1]
yc = y[i]
ym = y[i-1]
X = np.array([[xp ** 2, xp , 1], [xc **2, xc, 1], [xm ** 2, xm, 1]])
B = np.array([yp, yc, ym])
a = np.linalg.solve(X, B)
slope[i] = 2 * a[0] * xc + a[1]
return slope
path_to_results = '/home/ng213/2TB/pazy_code/pazy-aepw3-results/01_Bending/'
output_figures_folder = '../figures_aiaaj/'
bending_results = {}
bending_results['sharpy_w_skin'] = {'file': path_to_results + '/bending_sharpy_w_skin.txt',
'skin': True,
'marker': 'o',
's': 4,
'label': 'Undeformed ref. line (SHARPy)', 'linestyle': {'markevery': 5}}
bending_results['sharpy_wo_skin'] = {'file': path_to_results + '//bending_sharpy_wo_skin.txt',
'skin': False,
'marker': 'o',
's': 4,
'label': 'Undeformed ref. line (SHARPy)', 'linestyle': {'markevery': 5}}
# bending_results['um_w_skin'] = {'file': path_to_results + '/bending_UMNAST_w_skin.txt',
# 'skin': True,
# 'marker': 's',
# 's': 4,
# 'label':'UM/NAST', 'linestyle': {'markevery': 5}}
# bending_results['um_wo_skin'] = {'file': path_to_results + '/bending_UMNAST_wo_skin.txt',
# 'skin': False,
# 'marker': 's',
# 's': 4,
# 'label': 'UM/NAST', 'linestyle': {'markevery': 5}}
bending_results['technion_mrm_w_skin'] = {'file': path_to_results + '/bending_mrm_umbeam_w_skin.txt',
'skin': True,
'marker': '^',
's': 4,
'label': 'Curvature incl. (MRM)',}
bending_results['technion_mrm_wo_skin'] = {'file': path_to_results + '/bending_mrm_umbeam_wo_skin.txt',
'marker': '^',
's': 4,
'skin': False,
'label': 'Curvature incl. (MRM)'}
# bending_results['technion_ansys_w_skin'] = {'file': path_to_results + '/bending_ansys_w_skin.txt',
# 'skin': True,
# 'marker': 's',
# 's': 4,
# 'label': 'MRM Ansys modes', 'linestyle':{'alpha': 0.6}}
# bending_results['technion_ansys_wo_skin'] = {'file': path_to_results + '/bending_ansys_wo_skin.txt',
# 'skin': False,
# 'marker': 's',
# 's': 4,
# 'label': 'MRM Ansys modes', 'linestyle':{'alpha': 0.6}}
# bending_results['nastran'] = {'file': path_to_results + '/bending_UMNAST_parentFEM_wo_skin.txt',
# 'skin': False,
# 'marker': '+',
# 's': 4,
# 'ls': 'none',
# 'label': 'Nastran FEM',
# 'linestyle': {'markevery': 1}}
bending_results['technion_experimental_w_skin'] = {'file': path_to_results + '/bending_technion_experimental_w_skin.txt',
'skin': True,
'label': 'Experimental',
'marker': 'x',
's': 4,
'ls':'none'
}
bending_results['technion_experimental_wo_skin'] = {'file': path_to_results + '/bending_technion_experimental_wo_skin.txt',
'skin': False,
'label': 'Experimental',
'marker': 'x',
's': 4,
'ls':'none'
}
load_linear_limit = 0.9 #kg
for key, case in bending_results.items():
case['data'] = np.loadtxt(case['file'])
if case['label'] == 'Technion Experimental':
x = case['data'][case['data'][:, 0] < load_linear_limit, 0][1:]
order = np.argsort(x)
x = x[order]
y = case['data'][case['data'][:, 0] < load_linear_limit, 1][1:]
y = y[order]
else:
x = case['data'][case['data'][:, 0] < load_linear_limit, 0]
y = case['data'][case['data'][:, 0] < load_linear_limit, 1]
case['linear'] = stats.linregress(x, y)
cm2in = 1/2.54
ar = 1.57
ar = 3
width_cm = 20
remove_offset = True
figsize = (width_cm * cm2in, width_cm / ar * cm2in)
fig, ax = plt.subplots(ncols=2, figsize=figsize)
for case in bending_results.values():
if case['skin']:
a = ax[0]
else:
a = ax[1]
if case['marker'] == '+':
mfc = 'k'
else:
mfc = 'none'
if remove_offset:
offset = case['data'][0, 1]
else:
offset = 0
a.plot(case['data'][:, 0], case['data'][:, 1] - offset, label=case['label'], marker=case['marker'], ms=case['s'], markeredgecolor='k', mfc=mfc, ls='none',
lw=0.5, color='k', markeredgewidth=0.7,
**case.get('linestyle', {}))
for a in ax:
a.legend(fontsize=8)
a.set_xlabel('Wing tip load, kg')
a.set_ylabel('Wing tip vertical displacement, m')
a.grid()
a.set_xlim(0, 3.5)
a.set_ylim(-0.35, 0.)
a.xaxis.set_tick_params(which='major', direction='in', top='on', width=0.5)
a.xaxis.set_tick_params(which='minor', direction='in', top='on', width=0.5)
a.yaxis.set_tick_params(which='major', direction='in', right='on', width=0.5)
a.yaxis.set_tick_params(which='minor', direction='in', right='on', width=0.5)
for item in ([a.title, a.xaxis.label, a.yaxis.label] +
a.get_xticklabels() + a.get_yticklabels()):
item.set_fontsize(8)
plt.tight_layout()
if SAVE:
plt.savefig(output_figures_folder + '02_Bending.pdf')
print('Skin on')
for k, case in bending_results.items():
if case['skin']:
print('{:20s}\t\t\tslope = {:.2f} cm/kg \t intercept = {:.2f} cm \t 1g deflection = {:.2f}'.format(k, case['linear'].slope * 100, case['linear'].intercept * 100, case['data'][0, 1] * 100))
print('\nSkin off')
for k, case in bending_results.items():
if not case['skin']:
print('{:20s}\t\t\tslope = {:.2f} cm/kg \t intercept = {:.2f} cm \t 1g deflection = {:.2f}'.format(k, case['linear'].slope * 100, case['linear'].intercept * 100, case['data'][0, 1] * 100))
```
# Slope at each point
```
cm2in = 1/2.54
ar = 1.57
ar = 3
width_cm = 20
figsize = (width_cm * cm2in, width_cm / ar * cm2in)
fig, ax = plt.subplots(ncols=2, figsize=figsize)
for case in bending_results.values():
if case['skin']:
a = ax[0]
a.set_title('Skin on')
else:
a = ax[1]
a.set_title('Skin off')
if case['marker'] == '+':
mfc = 'k'
else:
mfc = 'none'
if case['label'] == 'Experimental':
continue
slope = instantaneous_slope(case['data'][:, 1], case['data'][:, 0])
a.plot(case['data'][:, 0], 100 * slope, label=case['label'], marker=case['marker'], ms=case['s'], markeredgecolor='k', mfc=mfc, ls='none',
lw=0.5, color='k', markeredgewidth=0.7,
**case.get('linestyle', {}))
for a in ax:
a.legend(fontsize=8)
a.set_xlabel('Wing tip load, kg')
a.set_ylabel('Gradient, cm/kg')
a.grid()
a.set_xlim(0, 3.5)
a.set_ylim(-14, -4)
for item in ([a.title, a.xaxis.label, a.yaxis.label] +
a.get_xticklabels() + a.get_yticklabels()):
item.set_fontsize(8)
plt.tight_layout()
if SAVE:
plt.savefig(output_figures_folder + '02_Bending_Slope.pdf')
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy.stats as stats
mpl.rcParams['figure.dpi'] = 100
mpl.rcParams['figure.figsize'] = (8, 6)
%config InlineBackend.figure_format = 'retina'
SAVE = True
def instantaneous_slope(y, x):
slope = np.zeros(len(x))
for i in range(len(x)):
if i == 0:
slope[0] = (y[1] - y[0]) / (x[1] - x[0])
elif i == len(x) - 1:
slope[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
else:
# slope[i] = (y[i+1] - y[i-1]) / (x[i+1] - x[i-1])
# slope[i] = (y[i] - y[i-1]) / (x[i] - x[i-1])
xp = x[i+1]
xc = x[i]
xm = x[i-1]
yp = y[i+1]
yc = y[i]
ym = y[i-1]
X = np.array([[xp ** 2, xp , 1], [xc **2, xc, 1], [xm ** 2, xm, 1]])
B = np.array([yp, yc, ym])
a = np.linalg.solve(X, B)
slope[i] = 2 * a[0] * xc + a[1]
return slope
path_to_results = '/home/ng213/2TB/pazy_code/pazy-aepw3-results/01_Bending/'
output_figures_folder = '../figures_aiaaj/'
bending_results = {}
bending_results['sharpy_w_skin'] = {'file': path_to_results + '/bending_sharpy_w_skin.txt',
'skin': True,
'marker': 'o',
's': 4,
'label': 'Undeformed ref. line (SHARPy)', 'linestyle': {'markevery': 5}}
bending_results['sharpy_wo_skin'] = {'file': path_to_results + '//bending_sharpy_wo_skin.txt',
'skin': False,
'marker': 'o',
's': 4,
'label': 'Undeformed ref. line (SHARPy)', 'linestyle': {'markevery': 5}}
# bending_results['um_w_skin'] = {'file': path_to_results + '/bending_UMNAST_w_skin.txt',
# 'skin': True,
# 'marker': 's',
# 's': 4,
# 'label':'UM/NAST', 'linestyle': {'markevery': 5}}
# bending_results['um_wo_skin'] = {'file': path_to_results + '/bending_UMNAST_wo_skin.txt',
# 'skin': False,
# 'marker': 's',
# 's': 4,
# 'label': 'UM/NAST', 'linestyle': {'markevery': 5}}
bending_results['technion_mrm_w_skin'] = {'file': path_to_results + '/bending_mrm_umbeam_w_skin.txt',
'skin': True,
'marker': '^',
's': 4,
'label': 'Curvature incl. (MRM)',}
bending_results['technion_mrm_wo_skin'] = {'file': path_to_results + '/bending_mrm_umbeam_wo_skin.txt',
'marker': '^',
's': 4,
'skin': False,
'label': 'Curvature incl. (MRM)'}
# bending_results['technion_ansys_w_skin'] = {'file': path_to_results + '/bending_ansys_w_skin.txt',
# 'skin': True,
# 'marker': 's',
# 's': 4,
# 'label': 'MRM Ansys modes', 'linestyle':{'alpha': 0.6}}
# bending_results['technion_ansys_wo_skin'] = {'file': path_to_results + '/bending_ansys_wo_skin.txt',
# 'skin': False,
# 'marker': 's',
# 's': 4,
# 'label': 'MRM Ansys modes', 'linestyle':{'alpha': 0.6}}
# bending_results['nastran'] = {'file': path_to_results + '/bending_UMNAST_parentFEM_wo_skin.txt',
# 'skin': False,
# 'marker': '+',
# 's': 4,
# 'ls': 'none',
# 'label': 'Nastran FEM',
# 'linestyle': {'markevery': 1}}
bending_results['technion_experimental_w_skin'] = {'file': path_to_results + '/bending_technion_experimental_w_skin.txt',
'skin': True,
'label': 'Experimental',
'marker': 'x',
's': 4,
'ls':'none'
}
bending_results['technion_experimental_wo_skin'] = {'file': path_to_results + '/bending_technion_experimental_wo_skin.txt',
'skin': False,
'label': 'Experimental',
'marker': 'x',
's': 4,
'ls':'none'
}
load_linear_limit = 0.9 #kg
for key, case in bending_results.items():
case['data'] = np.loadtxt(case['file'])
if case['label'] == 'Technion Experimental':
x = case['data'][case['data'][:, 0] < load_linear_limit, 0][1:]
order = np.argsort(x)
x = x[order]
y = case['data'][case['data'][:, 0] < load_linear_limit, 1][1:]
y = y[order]
else:
x = case['data'][case['data'][:, 0] < load_linear_limit, 0]
y = case['data'][case['data'][:, 0] < load_linear_limit, 1]
case['linear'] = stats.linregress(x, y)
cm2in = 1/2.54
ar = 1.57
ar = 3
width_cm = 20
remove_offset = True
figsize = (width_cm * cm2in, width_cm / ar * cm2in)
fig, ax = plt.subplots(ncols=2, figsize=figsize)
for case in bending_results.values():
if case['skin']:
a = ax[0]
else:
a = ax[1]
if case['marker'] == '+':
mfc = 'k'
else:
mfc = 'none'
if remove_offset:
offset = case['data'][0, 1]
else:
offset = 0
a.plot(case['data'][:, 0], case['data'][:, 1] - offset, label=case['label'], marker=case['marker'], ms=case['s'], markeredgecolor='k', mfc=mfc, ls='none',
lw=0.5, color='k', markeredgewidth=0.7,
**case.get('linestyle', {}))
for a in ax:
a.legend(fontsize=8)
a.set_xlabel('Wing tip load, kg')
a.set_ylabel('Wing tip vertical displacement, m')
a.grid()
a.set_xlim(0, 3.5)
a.set_ylim(-0.35, 0.)
a.xaxis.set_tick_params(which='major', direction='in', top='on', width=0.5)
a.xaxis.set_tick_params(which='minor', direction='in', top='on', width=0.5)
a.yaxis.set_tick_params(which='major', direction='in', right='on', width=0.5)
a.yaxis.set_tick_params(which='minor', direction='in', right='on', width=0.5)
for item in ([a.title, a.xaxis.label, a.yaxis.label] +
a.get_xticklabels() + a.get_yticklabels()):
item.set_fontsize(8)
plt.tight_layout()
if SAVE:
plt.savefig(output_figures_folder + '02_Bending.pdf')
print('Skin on')
for k, case in bending_results.items():
if case['skin']:
print('{:20s}\t\t\tslope = {:.2f} cm/kg \t intercept = {:.2f} cm \t 1g deflection = {:.2f}'.format(k, case['linear'].slope * 100, case['linear'].intercept * 100, case['data'][0, 1] * 100))
print('\nSkin off')
for k, case in bending_results.items():
if not case['skin']:
print('{:20s}\t\t\tslope = {:.2f} cm/kg \t intercept = {:.2f} cm \t 1g deflection = {:.2f}'.format(k, case['linear'].slope * 100, case['linear'].intercept * 100, case['data'][0, 1] * 100))
cm2in = 1/2.54
ar = 1.57
ar = 3
width_cm = 20
figsize = (width_cm * cm2in, width_cm / ar * cm2in)
fig, ax = plt.subplots(ncols=2, figsize=figsize)
for case in bending_results.values():
if case['skin']:
a = ax[0]
a.set_title('Skin on')
else:
a = ax[1]
a.set_title('Skin off')
if case['marker'] == '+':
mfc = 'k'
else:
mfc = 'none'
if case['label'] == 'Experimental':
continue
slope = instantaneous_slope(case['data'][:, 1], case['data'][:, 0])
a.plot(case['data'][:, 0], 100 * slope, label=case['label'], marker=case['marker'], ms=case['s'], markeredgecolor='k', mfc=mfc, ls='none',
lw=0.5, color='k', markeredgewidth=0.7,
**case.get('linestyle', {}))
for a in ax:
a.legend(fontsize=8)
a.set_xlabel('Wing tip load, kg')
a.set_ylabel('Gradient, cm/kg')
a.grid()
a.set_xlim(0, 3.5)
a.set_ylim(-14, -4)
for item in ([a.title, a.xaxis.label, a.yaxis.label] +
a.get_xticklabels() + a.get_yticklabels()):
item.set_fontsize(8)
plt.tight_layout()
if SAVE:
plt.savefig(output_figures_folder + '02_Bending_Slope.pdf')
| 0.127598 | 0.487917 |
# `concurrent.futures` Executor
```
%run -m literary.notebook
import time
import asyncio
import asyncio.futures
import weakref
from concurrent import futures
from .executor import AsyncExecutor
from .futures import chain_future_exception, chain_future_handle, create_future
```
The `ConcurrentFuturesExecutor` accepts a `futures.Executor` object:
```
class ConcurrentFuturesExecutor(AsyncExecutor):
def __init__(self, executor: futures.Executor):
self._executor = executor
```
Because `futures.Executor` instances act as context managers, we should implement the same interface.
```
@patch(ConcurrentFuturesExecutor)
def __enter__(self):
self._executor.__enter__()
return self
@patch(ConcurrentFuturesExecutor)
def __exit__(self, exc_type, exc_val, exc_tb):
self._executor.__exit__(exc_type, exc_val, exc_tb)
return
```
To submit a task, we launch an `_apply_async` task which dispatches to the `futures.Executor` and chains the resulting `futures.Future` to the `aio_cf_future` object. We also chain the exceptions of the task to the result in order to propagate errors and cancellations.
```
@patch(ConcurrentFuturesExecutor)
def _apply(self, func, /, *args, **kwargs) -> asyncio.Future:
aio_cf_future = create_future()
# Because the unwrap stage actually needs to wait for results,
# we create a task to do this work
task = asyncio.create_task(self._apply_async(func, aio_cf_future, *args, **kwargs))
# Allow task to be cancelled or raise exceptions
chain_future_exception(task, aio_cf_future)
return aio_cf_future
```
As the `futures.Executor` executor is required to resolve handles on the client, any handles provided as arguments must be resolved before dispatching to the executor. In this `_apply_async` method, we invoke the `futures.Executor.submit` method, and chain the `futures.Future` object with the given `asyncio.Future` handle. This future holds the status of the running task.
```
@patch(ConcurrentFuturesExecutor)
async def _apply_async(self, func, aio_cf_fut: asyncio.Future, /, *args, **kwargs):
args, kwargs = await self._process_args(args, kwargs)
# Launch task into pool
cf_fut = self._executor.submit(func, *args, **kwargs)
# When we have the concurrent.futures.Future object,
# chain it with the "proxy" fut. We can do this
# because the data have to be retrieved locally anyway.
asyncio.futures._chain_future(cf_fut, aio_cf_fut)
```
The `futures.Executor.submit` method cannot make use of `futures.Future` arguments. Instead, we have to resolve them to their computed values by waiting on them first:
```
@patch(ConcurrentFuturesExecutor)
async def _process_args(self, args, kwargs):
# Unwrap any wrapped handles
args = [await self._unwrap_and_wait_maybe(x) for x in args]
kwargs = {k: await self._unwrap_and_wait_maybe(v) for k, v in kwargs.items()}
return args, kwargs
```
Here, we unwrap the `asyncio.Future` handles in the arguments, and await their results so that can be passed to `futures.Executor.submit`.
```
@patch(ConcurrentFuturesExecutor)
async def _unwrap_and_wait_maybe(self, obj):
# Ensure is wrapped
try:
self._unwrap_handle(obj)
except ValueError:
return obj
return await self.retrieve(obj)
```
As the `future` that is returned by `_async` is an `asyncio.Future` object, we can use the `chain_future_handle` helper to register the necessary callbacks.
```
@patch(ConcurrentFuturesExecutor)
def _register_handle(self, handle, future):
chain_future_handle(future, handle)
```
To demonstrate this, we can create a thread pool executor:
```
pool = futures.ProcessPoolExecutor()
```
Using this pool we can create an exectutor:
```
executor = ConcurrentFuturesExecutor(pool)
```
To do some work, let's implement a sleep function that returns the delay
```
def slow_function(timeout):
time.sleep(timeout)
return timeout
```
Now we can chain a few of these tasks together:
```
a = executor.submit(slow_function, 2)
b = executor.submit(slow_function, 5)
c = executor.submit(int.__add__, a, b)
```
We can wait for the result without retrieving its value:
```
await c
```
And when we're ready for the value, we invoke `executor.retrieve`.
```
assert await executor.retrieve(c) == 7
```
|
github_jupyter
|
%run -m literary.notebook
import time
import asyncio
import asyncio.futures
import weakref
from concurrent import futures
from .executor import AsyncExecutor
from .futures import chain_future_exception, chain_future_handle, create_future
class ConcurrentFuturesExecutor(AsyncExecutor):
def __init__(self, executor: futures.Executor):
self._executor = executor
@patch(ConcurrentFuturesExecutor)
def __enter__(self):
self._executor.__enter__()
return self
@patch(ConcurrentFuturesExecutor)
def __exit__(self, exc_type, exc_val, exc_tb):
self._executor.__exit__(exc_type, exc_val, exc_tb)
return
@patch(ConcurrentFuturesExecutor)
def _apply(self, func, /, *args, **kwargs) -> asyncio.Future:
aio_cf_future = create_future()
# Because the unwrap stage actually needs to wait for results,
# we create a task to do this work
task = asyncio.create_task(self._apply_async(func, aio_cf_future, *args, **kwargs))
# Allow task to be cancelled or raise exceptions
chain_future_exception(task, aio_cf_future)
return aio_cf_future
@patch(ConcurrentFuturesExecutor)
async def _apply_async(self, func, aio_cf_fut: asyncio.Future, /, *args, **kwargs):
args, kwargs = await self._process_args(args, kwargs)
# Launch task into pool
cf_fut = self._executor.submit(func, *args, **kwargs)
# When we have the concurrent.futures.Future object,
# chain it with the "proxy" fut. We can do this
# because the data have to be retrieved locally anyway.
asyncio.futures._chain_future(cf_fut, aio_cf_fut)
@patch(ConcurrentFuturesExecutor)
async def _process_args(self, args, kwargs):
# Unwrap any wrapped handles
args = [await self._unwrap_and_wait_maybe(x) for x in args]
kwargs = {k: await self._unwrap_and_wait_maybe(v) for k, v in kwargs.items()}
return args, kwargs
@patch(ConcurrentFuturesExecutor)
async def _unwrap_and_wait_maybe(self, obj):
# Ensure is wrapped
try:
self._unwrap_handle(obj)
except ValueError:
return obj
return await self.retrieve(obj)
@patch(ConcurrentFuturesExecutor)
def _register_handle(self, handle, future):
chain_future_handle(future, handle)
pool = futures.ProcessPoolExecutor()
executor = ConcurrentFuturesExecutor(pool)
def slow_function(timeout):
time.sleep(timeout)
return timeout
a = executor.submit(slow_function, 2)
b = executor.submit(slow_function, 5)
c = executor.submit(int.__add__, a, b)
await c
assert await executor.retrieve(c) == 7
| 0.50293 | 0.887984 |
```
#definition of functions
import os
import glob
import SimpleITK as sitk
from skimage import exposure
from skimage import filters
from scipy.ndimage import morphology
from skimage import measure
from PIL import Image
import numpy as np
import pydicom
import tensorflow as tf
from keras import backend as K
from keras.backend import tensorflow_backend
from keras.models import load_model
def bgd_masking(img,thres=-300): #mask non-tissue area
erode=morphology.binary_closing(morphology.binary_dilation(morphology.binary_erosion(img > thres)))
mask=morphology.binary_fill_holes(erode)
blobs_labels = measure.label(mask, background=0)
blob_hist, blob_bins_center = exposure.histogram(blobs_labels)
first=np.argmax(blob_hist[1:])+1;
maskimg=np.copy(img)
maskimg[blobs_labels!=first]=np.min(maskimg)
return maskimg
def dcmtonumpy3d(dcm_list,minimum=-160,maximum=240,new_y=160,new_x=160): #convert dcm to numpy (5mm slice)
final_array = np.empty((0,new_y,new_x))
dcm_ex = pydicom.dcmread(dcm_list[0])
thickness = dcm_ex[0x0018, 0x0050].value
interval = int(np.ceil(5.0 / thickness))
slicenum_list = np.arange(0,len(dcm_list),interval)
for slicenum in slicenum_list:
dcm = pydicom.dcmread(dcm_list[slicenum])
slice_array = dcm.pixel_array
slice_array = bgd_masking(slice_array)
rescaleslope = dcm.RescaleSlope
rescaleintercept = dcm.RescaleIntercept
slice_array = slice_array * rescaleslope + rescaleintercept
slice_array = np.clip(slice_array,minimum,maximum)
slice_array = np.round((slice_array - minimum) * 255 / (maximum - minimum)).astype("int16")
spacing = dcm[0x0028, 0x0030].value
space = float(spacing[0])
res = int(256 * space) #spacing 2mm
pil_data = Image.fromarray(slice_array)
pil_resize = pil_data.resize(size=(res,res))
slice_array = np.array(pil_resize)
slice_y = slice_array.shape[0]
slice_x = slice_array.shape[1]
new_array = np.zeros((new_y,new_x))
if slice_y >= new_y:
crop_y = int(((slice_y - new_y) / 2))
slice_array = slice_array[crop_y:crop_y+new_y,:]
start_y = 0
end_y = start_y + new_y
else:
start_y = int(np.floor((new_y - slice_y) / 2))
end_y = start_y + slice_y
if slice_x >= new_x:
crop_x = int(np.floor((slice_x - new_x) / 2))
slice_array = slice_array[:,crop_x:crop_x+new_x]
start_x = 0
end_x = start_x + new_x
else:
start_x = int(np.floor((new_x - slice_x) / 2))
end_x = start_x + slice_x
new_array[start_y:end_y,start_x:end_x] = slice_array
new_array = new_array.reshape(1,new_y,new_x)
final_array = np.concatenate([final_array, new_array])
return final_array
def center_extraction(image): #estimation of the central point of the object
shape_z = image.shape[0]
shape_y = image.shape[1]
shape_x = image.shape[2]
outlist = []
for i in range(shape_z):
outsum = np.sum(image[i])
outlist.append(outsum)
center_z = np.argmax(outlist)
image_cz = image[center_z]
ant_edge_list = []
pos_edge_list = []
y_flag = 0
for j in range(shape_y): #large_y
y_sum = np.sum(image_cz[j])
if y_sum >= 2 and y_flag == 0:
ant_edge_list.append(j)
y_flag = 1
elif y_sum < 2 and y_flag == 1:
pos_edge_list.append(j)
y_flag = 0
if j == (shape_y - 1):
y_flag = 0
if len(ant_edge_list) == len(pos_edge_list) + 1:
pos_edge_list.append(shape_y - 1)
ant_edge_list = np.array(ant_edge_list)
pos_edge_list = np.array(pos_edge_list)
try:
ydif = pos_edge_list - ant_edge_list
center_y = int((ant_edge_list[np.argmax(ydif)] + pos_edge_list[np.argmax(ydif)]) / 2)
right_edge_list = []
left_edge_list = []
x_flag = 0
for k in range(shape_x): #half-largex
if image_cz[center_y][k] >= 0.5 and x_flag == 0:
right_edge_list.append(k)
x_flag = 1
elif image_cz[center_y][k] < 0.5 and x_flag == 1:
left_edge_list.append(k)
x_flag = 0
if k == (shape_x - 1):
x_flag = 0
if len(right_edge_list) == len(left_edge_list) + 1:
left_edge_list.append(shape_x - 1)
right_edge_list = np.array(right_edge_list)
left_edge_list = np.array(left_edge_list)
xdif = left_edge_list - right_edge_list
center_x = int((right_edge_list[np.argmax(xdif)] + left_edge_list[np.argmax(xdif)]) / 2)
return center_z, center_y, center_x
except ValueError:
return None, None, None
def crop3dimage(image,center_z,center_y,center_x,z_length,y_length,x_length):
start_z = np.minimum(np.maximum(0, int(center_z - (z_length / 2))), int(image.shape[0] - z_length))
start_y = np.minimum(np.maximum(0, int(center_y - (y_length / 2))), int(image.shape[1] - y_length))
start_x = np.minimum(np.maximum(0, int(center_x - (x_length / 2))), int(image.shape[2] - x_length))
croppedimage = image[start_z:start_z+z_length,start_y:start_y+y_length,start_x:start_x+x_length]
return croppedimage
def bbox_edge_2d(array,image, width=12): #calculate the edge of the bounding box
shape_z = image.shape[0]
shape_y = image.shape[1]
shape_x = image.shape[2]
outlist = []
up_edge_list = []
down_edge_list = []
ant_edge_list = []
pos_edge_list = []
right_edge_list = []
left_edge_list = []
z_flag = 0
for i in range(shape_z):
outsum = np.sum(image[i])
outlist.append(outsum)
if outsum >= 2 and z_flag == 0:
up_edge_list.append(i)
z_flag = 1
elif outsum < 2 and z_flag == 1:
down_edge_list.append(i)
z_flag = 0
if i == (shape_z - 1):
z_flag = 0
if len(up_edge_list) == len(down_edge_list) + 1:
down_edge_list.append(shape_z - 1)
center_z = np.argmax(outlist)
image_cz = image[center_z]
y_flag = 0
for j in range(shape_y):
y_sum = np.sum(image_cz[j])
if y_sum >= 1 and y_flag == 0:
ant_edge_list.append(j)
y_flag = 1
elif y_sum < 1 and y_flag == 1:
pos_edge_list.append(j)
y_flag = 0
if j == (shape_y - 1):
y_flag = 0
if len(ant_edge_list) == len(pos_edge_list) + 1:
pos_edge_list.append(shape_y - 1)
ant_edge_list = np.array(ant_edge_list)
pos_edge_list = np.array(pos_edge_list)
ydif = pos_edge_list - ant_edge_list
ant_edge = ant_edge_list[np.argmax(ydif)]
pos_edge = pos_edge_list[np.argmax(ydif)]
length_y = pos_edge - ant_edge
center_y = int((ant_edge + pos_edge) / 2)
x_flag = 0
for k in range(shape_x):
if image_cz[center_y][k] >= 0.5 and x_flag == 0:
right_edge_list.append(k)
x_flag = 1
elif image_cz[center_y][k] < 0.5 and x_flag == 1:
left_edge_list.append(k)
x_flag = 0
if k == (shape_x - 1):
x_flag = 0
if len(right_edge_list) == len(left_edge_list) + 1:
left_edge_list.append(shape_x - 1)
right_edge_list = np.array(right_edge_list)
left_edge_list = np.array(left_edge_list)
try:
xdif = left_edge_list - right_edge_list
right_edge = right_edge_list[np.argmax(xdif)]
left_edge = left_edge_list[np.argmax(xdif)]
length_x = left_edge - right_edge
center_x = int((right_edge + left_edge) / 2)
length = np.maximum(length_x, length_y)+ width
final_right = np.maximum(center_x - int(length / 2), 0)
final_left = np.minimum(center_x + int(length / 2), (shape_x - 1))
final_ant = np.maximum(center_y - int(length / 2), 0)
final_pos = np.minimum(center_y + int(length / 2), (shape_y - 1))
return center_z, final_ant, final_pos, final_right, final_left
except ValueError:
return None, None, None, None, None
def minus_dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (-2. * intersection + K.epsilon()) / (K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon())
def loss(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
bce = tf.keras.losses.BinaryCrossentropy()
return bce(y_true_f, y_pred_f) + 1 + minus_dice_coef(y_true, y_pred)
print("Definition finished")
#parameter
#path of RCC cases(directory list)
cases = os.listdir("cases_example/")
#path of directory for saving images of extracted RCC
save_path = "save_example/"
#GPU
os.environ["CUDA_VISIBLE_DEVICES"]="2"
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
session = tf.Session(config=config)
tensorflow_backend.set_session(session)
#threshold
kidney_threshold = 1000 #threshold of kidney-segmented pixel count
RCC_threshold = 50 #threshold of RCC-segmented voxel count
#model
kidney_model = load_model("models/kidney_segmentation.h5", custom_objects={"minus_dice_coef":minus_dice_coef})
smallRCC_model = load_model("models/RCC_segmentation.h5",custom_objects={"loss":loss, "minus_dice_coef":minus_dice_coef})
print("Declaration is completed.")
#model application
for case in cases:
print(case)
dicom_list = glob.glob("cases_example/" + case + "/*.dcm")
dicom_list.sort()
target = dcmtonumpy3d(dicom_list)
z_shape = target.shape[0]
if z_shape < 40: #zero padding if the number of slices is < 40
new_target = np.zeros((40,160,160))
s = int((40- target.shape[0]) / 2)
new_target[s:s+z_shape] = target
target = new_target
right_kidney_seg = np.zeros((target.shape[0],target.shape[1],int(target.shape[2] / 2)))
left_kidney_seg = np.zeros((target.shape[0],target.shape[1],int(target.shape[2] / 2)))
for i in range(target.shape[0]):
image = (target[i] / 255).astype("float32")
img_input = image.reshape(-1,target.shape[1],target.shape[2],1)
kid = kidney_model.predict(img_input)
kid = kid.reshape(target.shape[1],target.shape[2])
k_r = kid[:,:int(target.shape[2] / 2)]
k_l = kid[:,int(target.shape[2] / 2):]
right_kidney_seg[i] = k_r
left_kidney_seg[i] = k_l
# right RCC
if np.sum(right_kidney_seg) <= kidney_threshold:
print("Right Kidney Undetected")
else:
center_rz,center_ry,center_rx = center_extraction(right_kidney_seg)
if center_rz is not None:
right_cropped = crop3dimage(target[:,:,:int(target.shape[2] / 2)],center_rz,center_ry,center_rx,40,64,64)
ckr = (right_cropped / 255).astype("float32")
ckr_input = ckr.reshape(-1,40,64,64,1)
rccseg_r = smallRCC_model.predict(ckr_input)
if np.sum(rccseg_r) > RCC_threshold:
rccseg_r = rccseg_r.reshape(40,64,64)
r_center, r_ant, r_pos, r_right, r_left = bbox_edge_2d(right_cropped,rccseg_r,12)
if r_center is not None:
slice_r = right_cropped[r_center]
slice_r = slice_r[..., np.newaxis].astype("int16")
img_colr = np.concatenate([slice_r,slice_r,slice_r],axis=-1)
img_colr[r_ant,r_right:r_left,1:3] = 0
img_colr[r_pos,r_right:r_left,1:3] = 0
img_colr[r_ant:r_pos,r_right,1:3] = 0
img_colr[r_ant:r_pos+1,r_left,1:3] = 0
img_colr[r_ant,r_right:r_left,0] = 255
img_colr[r_pos,r_right:r_left,0] = 255
img_colr[r_ant:r_pos,r_right,0] = 255
img_colr[r_ant:r_pos+1,r_left,0] = 255
img_colr = img_colr.astype("uint8")
imgr = Image.fromarray(img_colr)
imgr.save(save_path + case + "_r.png") #save right RCC-suspected lesion as PNG file
print("Right RCC-susp lesion detected")
else:
print("Right RCC-susp lesion extraction failed")
else:
print("No right RCC")
else:
print("Right kidney extraction failed")
#left RCC
center_lz,center_ly,center_lx = center_extraction(left_kidney_seg)
left_cropped = crop3dimage(target[:,:,80:],center_lz,center_ly,center_lx,40,64,64)
if np.sum(left_kidney_seg) <= kidney_threshold:
print("Left kidney undetected")
else:
center_lz,center_ly,center_lx = center_extraction(left_kidney_seg)
if center_lz is not None:
left_cropped = crop3dimage(target[:,:,int(target.shape[2] / 2):],center_lz,center_ly,center_lx,40,64,64)
ckl = (left_cropped / 255).astype("float32")
ckl_input = ckl.reshape(-1,40,64,64,1)
rccseg_l = smallRCC_model.predict(ckl_input)
if np.sum(rccseg_l) > RCC_threshold:
rccseg_l = rccseg_l.reshape(40,64,64)
l_center, l_ant, l_pos, l_right, l_left = bbox_edge_2d(left_cropped,rccseg_l,12)
if l_center is not None:
slice_l = left_cropped[l_center]
slice_l = slice_l[..., np.newaxis].astype("int16")
img_coll = np.concatenate([slice_l,slice_l,slice_l],axis=-1)
img_coll[l_ant,l_right:l_left,1:3] = 0
img_coll[l_pos,l_right:l_left,1:3] = 0
img_coll[l_ant:l_pos,l_right,1:3] = 0
img_coll[l_ant:l_pos+1,l_left,1:3] = 0
img_coll[l_ant,l_right:l_left,0] = 255
img_coll[l_pos,l_right:l_left,0] = 255
img_coll[l_ant:l_pos,l_right,0] = 255
img_coll[l_ant:l_pos+1,l_left,0] = 255
img_coll = img_coll.astype("uint8")
imgl = Image.fromarray(img_coll)
imgl.save(save_path + case + "_l.png") #save left RCC-suspected lesion as PNG file
print("Left RCC-susp lesion detected")
else:
print("Left RCC-susp lesion extraction failed")
else:
print("No left RCC")
else:
print("Left kidney extraction failed")
print("")
```
|
github_jupyter
|
#definition of functions
import os
import glob
import SimpleITK as sitk
from skimage import exposure
from skimage import filters
from scipy.ndimage import morphology
from skimage import measure
from PIL import Image
import numpy as np
import pydicom
import tensorflow as tf
from keras import backend as K
from keras.backend import tensorflow_backend
from keras.models import load_model
def bgd_masking(img,thres=-300): #mask non-tissue area
erode=morphology.binary_closing(morphology.binary_dilation(morphology.binary_erosion(img > thres)))
mask=morphology.binary_fill_holes(erode)
blobs_labels = measure.label(mask, background=0)
blob_hist, blob_bins_center = exposure.histogram(blobs_labels)
first=np.argmax(blob_hist[1:])+1;
maskimg=np.copy(img)
maskimg[blobs_labels!=first]=np.min(maskimg)
return maskimg
def dcmtonumpy3d(dcm_list,minimum=-160,maximum=240,new_y=160,new_x=160): #convert dcm to numpy (5mm slice)
final_array = np.empty((0,new_y,new_x))
dcm_ex = pydicom.dcmread(dcm_list[0])
thickness = dcm_ex[0x0018, 0x0050].value
interval = int(np.ceil(5.0 / thickness))
slicenum_list = np.arange(0,len(dcm_list),interval)
for slicenum in slicenum_list:
dcm = pydicom.dcmread(dcm_list[slicenum])
slice_array = dcm.pixel_array
slice_array = bgd_masking(slice_array)
rescaleslope = dcm.RescaleSlope
rescaleintercept = dcm.RescaleIntercept
slice_array = slice_array * rescaleslope + rescaleintercept
slice_array = np.clip(slice_array,minimum,maximum)
slice_array = np.round((slice_array - minimum) * 255 / (maximum - minimum)).astype("int16")
spacing = dcm[0x0028, 0x0030].value
space = float(spacing[0])
res = int(256 * space) #spacing 2mm
pil_data = Image.fromarray(slice_array)
pil_resize = pil_data.resize(size=(res,res))
slice_array = np.array(pil_resize)
slice_y = slice_array.shape[0]
slice_x = slice_array.shape[1]
new_array = np.zeros((new_y,new_x))
if slice_y >= new_y:
crop_y = int(((slice_y - new_y) / 2))
slice_array = slice_array[crop_y:crop_y+new_y,:]
start_y = 0
end_y = start_y + new_y
else:
start_y = int(np.floor((new_y - slice_y) / 2))
end_y = start_y + slice_y
if slice_x >= new_x:
crop_x = int(np.floor((slice_x - new_x) / 2))
slice_array = slice_array[:,crop_x:crop_x+new_x]
start_x = 0
end_x = start_x + new_x
else:
start_x = int(np.floor((new_x - slice_x) / 2))
end_x = start_x + slice_x
new_array[start_y:end_y,start_x:end_x] = slice_array
new_array = new_array.reshape(1,new_y,new_x)
final_array = np.concatenate([final_array, new_array])
return final_array
def center_extraction(image): #estimation of the central point of the object
shape_z = image.shape[0]
shape_y = image.shape[1]
shape_x = image.shape[2]
outlist = []
for i in range(shape_z):
outsum = np.sum(image[i])
outlist.append(outsum)
center_z = np.argmax(outlist)
image_cz = image[center_z]
ant_edge_list = []
pos_edge_list = []
y_flag = 0
for j in range(shape_y): #large_y
y_sum = np.sum(image_cz[j])
if y_sum >= 2 and y_flag == 0:
ant_edge_list.append(j)
y_flag = 1
elif y_sum < 2 and y_flag == 1:
pos_edge_list.append(j)
y_flag = 0
if j == (shape_y - 1):
y_flag = 0
if len(ant_edge_list) == len(pos_edge_list) + 1:
pos_edge_list.append(shape_y - 1)
ant_edge_list = np.array(ant_edge_list)
pos_edge_list = np.array(pos_edge_list)
try:
ydif = pos_edge_list - ant_edge_list
center_y = int((ant_edge_list[np.argmax(ydif)] + pos_edge_list[np.argmax(ydif)]) / 2)
right_edge_list = []
left_edge_list = []
x_flag = 0
for k in range(shape_x): #half-largex
if image_cz[center_y][k] >= 0.5 and x_flag == 0:
right_edge_list.append(k)
x_flag = 1
elif image_cz[center_y][k] < 0.5 and x_flag == 1:
left_edge_list.append(k)
x_flag = 0
if k == (shape_x - 1):
x_flag = 0
if len(right_edge_list) == len(left_edge_list) + 1:
left_edge_list.append(shape_x - 1)
right_edge_list = np.array(right_edge_list)
left_edge_list = np.array(left_edge_list)
xdif = left_edge_list - right_edge_list
center_x = int((right_edge_list[np.argmax(xdif)] + left_edge_list[np.argmax(xdif)]) / 2)
return center_z, center_y, center_x
except ValueError:
return None, None, None
def crop3dimage(image,center_z,center_y,center_x,z_length,y_length,x_length):
start_z = np.minimum(np.maximum(0, int(center_z - (z_length / 2))), int(image.shape[0] - z_length))
start_y = np.minimum(np.maximum(0, int(center_y - (y_length / 2))), int(image.shape[1] - y_length))
start_x = np.minimum(np.maximum(0, int(center_x - (x_length / 2))), int(image.shape[2] - x_length))
croppedimage = image[start_z:start_z+z_length,start_y:start_y+y_length,start_x:start_x+x_length]
return croppedimage
def bbox_edge_2d(array,image, width=12): #calculate the edge of the bounding box
shape_z = image.shape[0]
shape_y = image.shape[1]
shape_x = image.shape[2]
outlist = []
up_edge_list = []
down_edge_list = []
ant_edge_list = []
pos_edge_list = []
right_edge_list = []
left_edge_list = []
z_flag = 0
for i in range(shape_z):
outsum = np.sum(image[i])
outlist.append(outsum)
if outsum >= 2 and z_flag == 0:
up_edge_list.append(i)
z_flag = 1
elif outsum < 2 and z_flag == 1:
down_edge_list.append(i)
z_flag = 0
if i == (shape_z - 1):
z_flag = 0
if len(up_edge_list) == len(down_edge_list) + 1:
down_edge_list.append(shape_z - 1)
center_z = np.argmax(outlist)
image_cz = image[center_z]
y_flag = 0
for j in range(shape_y):
y_sum = np.sum(image_cz[j])
if y_sum >= 1 and y_flag == 0:
ant_edge_list.append(j)
y_flag = 1
elif y_sum < 1 and y_flag == 1:
pos_edge_list.append(j)
y_flag = 0
if j == (shape_y - 1):
y_flag = 0
if len(ant_edge_list) == len(pos_edge_list) + 1:
pos_edge_list.append(shape_y - 1)
ant_edge_list = np.array(ant_edge_list)
pos_edge_list = np.array(pos_edge_list)
ydif = pos_edge_list - ant_edge_list
ant_edge = ant_edge_list[np.argmax(ydif)]
pos_edge = pos_edge_list[np.argmax(ydif)]
length_y = pos_edge - ant_edge
center_y = int((ant_edge + pos_edge) / 2)
x_flag = 0
for k in range(shape_x):
if image_cz[center_y][k] >= 0.5 and x_flag == 0:
right_edge_list.append(k)
x_flag = 1
elif image_cz[center_y][k] < 0.5 and x_flag == 1:
left_edge_list.append(k)
x_flag = 0
if k == (shape_x - 1):
x_flag = 0
if len(right_edge_list) == len(left_edge_list) + 1:
left_edge_list.append(shape_x - 1)
right_edge_list = np.array(right_edge_list)
left_edge_list = np.array(left_edge_list)
try:
xdif = left_edge_list - right_edge_list
right_edge = right_edge_list[np.argmax(xdif)]
left_edge = left_edge_list[np.argmax(xdif)]
length_x = left_edge - right_edge
center_x = int((right_edge + left_edge) / 2)
length = np.maximum(length_x, length_y)+ width
final_right = np.maximum(center_x - int(length / 2), 0)
final_left = np.minimum(center_x + int(length / 2), (shape_x - 1))
final_ant = np.maximum(center_y - int(length / 2), 0)
final_pos = np.minimum(center_y + int(length / 2), (shape_y - 1))
return center_z, final_ant, final_pos, final_right, final_left
except ValueError:
return None, None, None, None, None
def minus_dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (-2. * intersection + K.epsilon()) / (K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon())
def loss(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
bce = tf.keras.losses.BinaryCrossentropy()
return bce(y_true_f, y_pred_f) + 1 + minus_dice_coef(y_true, y_pred)
print("Definition finished")
#parameter
#path of RCC cases(directory list)
cases = os.listdir("cases_example/")
#path of directory for saving images of extracted RCC
save_path = "save_example/"
#GPU
os.environ["CUDA_VISIBLE_DEVICES"]="2"
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
session = tf.Session(config=config)
tensorflow_backend.set_session(session)
#threshold
kidney_threshold = 1000 #threshold of kidney-segmented pixel count
RCC_threshold = 50 #threshold of RCC-segmented voxel count
#model
kidney_model = load_model("models/kidney_segmentation.h5", custom_objects={"minus_dice_coef":minus_dice_coef})
smallRCC_model = load_model("models/RCC_segmentation.h5",custom_objects={"loss":loss, "minus_dice_coef":minus_dice_coef})
print("Declaration is completed.")
#model application
for case in cases:
print(case)
dicom_list = glob.glob("cases_example/" + case + "/*.dcm")
dicom_list.sort()
target = dcmtonumpy3d(dicom_list)
z_shape = target.shape[0]
if z_shape < 40: #zero padding if the number of slices is < 40
new_target = np.zeros((40,160,160))
s = int((40- target.shape[0]) / 2)
new_target[s:s+z_shape] = target
target = new_target
right_kidney_seg = np.zeros((target.shape[0],target.shape[1],int(target.shape[2] / 2)))
left_kidney_seg = np.zeros((target.shape[0],target.shape[1],int(target.shape[2] / 2)))
for i in range(target.shape[0]):
image = (target[i] / 255).astype("float32")
img_input = image.reshape(-1,target.shape[1],target.shape[2],1)
kid = kidney_model.predict(img_input)
kid = kid.reshape(target.shape[1],target.shape[2])
k_r = kid[:,:int(target.shape[2] / 2)]
k_l = kid[:,int(target.shape[2] / 2):]
right_kidney_seg[i] = k_r
left_kidney_seg[i] = k_l
# right RCC
if np.sum(right_kidney_seg) <= kidney_threshold:
print("Right Kidney Undetected")
else:
center_rz,center_ry,center_rx = center_extraction(right_kidney_seg)
if center_rz is not None:
right_cropped = crop3dimage(target[:,:,:int(target.shape[2] / 2)],center_rz,center_ry,center_rx,40,64,64)
ckr = (right_cropped / 255).astype("float32")
ckr_input = ckr.reshape(-1,40,64,64,1)
rccseg_r = smallRCC_model.predict(ckr_input)
if np.sum(rccseg_r) > RCC_threshold:
rccseg_r = rccseg_r.reshape(40,64,64)
r_center, r_ant, r_pos, r_right, r_left = bbox_edge_2d(right_cropped,rccseg_r,12)
if r_center is not None:
slice_r = right_cropped[r_center]
slice_r = slice_r[..., np.newaxis].astype("int16")
img_colr = np.concatenate([slice_r,slice_r,slice_r],axis=-1)
img_colr[r_ant,r_right:r_left,1:3] = 0
img_colr[r_pos,r_right:r_left,1:3] = 0
img_colr[r_ant:r_pos,r_right,1:3] = 0
img_colr[r_ant:r_pos+1,r_left,1:3] = 0
img_colr[r_ant,r_right:r_left,0] = 255
img_colr[r_pos,r_right:r_left,0] = 255
img_colr[r_ant:r_pos,r_right,0] = 255
img_colr[r_ant:r_pos+1,r_left,0] = 255
img_colr = img_colr.astype("uint8")
imgr = Image.fromarray(img_colr)
imgr.save(save_path + case + "_r.png") #save right RCC-suspected lesion as PNG file
print("Right RCC-susp lesion detected")
else:
print("Right RCC-susp lesion extraction failed")
else:
print("No right RCC")
else:
print("Right kidney extraction failed")
#left RCC
center_lz,center_ly,center_lx = center_extraction(left_kidney_seg)
left_cropped = crop3dimage(target[:,:,80:],center_lz,center_ly,center_lx,40,64,64)
if np.sum(left_kidney_seg) <= kidney_threshold:
print("Left kidney undetected")
else:
center_lz,center_ly,center_lx = center_extraction(left_kidney_seg)
if center_lz is not None:
left_cropped = crop3dimage(target[:,:,int(target.shape[2] / 2):],center_lz,center_ly,center_lx,40,64,64)
ckl = (left_cropped / 255).astype("float32")
ckl_input = ckl.reshape(-1,40,64,64,1)
rccseg_l = smallRCC_model.predict(ckl_input)
if np.sum(rccseg_l) > RCC_threshold:
rccseg_l = rccseg_l.reshape(40,64,64)
l_center, l_ant, l_pos, l_right, l_left = bbox_edge_2d(left_cropped,rccseg_l,12)
if l_center is not None:
slice_l = left_cropped[l_center]
slice_l = slice_l[..., np.newaxis].astype("int16")
img_coll = np.concatenate([slice_l,slice_l,slice_l],axis=-1)
img_coll[l_ant,l_right:l_left,1:3] = 0
img_coll[l_pos,l_right:l_left,1:3] = 0
img_coll[l_ant:l_pos,l_right,1:3] = 0
img_coll[l_ant:l_pos+1,l_left,1:3] = 0
img_coll[l_ant,l_right:l_left,0] = 255
img_coll[l_pos,l_right:l_left,0] = 255
img_coll[l_ant:l_pos,l_right,0] = 255
img_coll[l_ant:l_pos+1,l_left,0] = 255
img_coll = img_coll.astype("uint8")
imgl = Image.fromarray(img_coll)
imgl.save(save_path + case + "_l.png") #save left RCC-suspected lesion as PNG file
print("Left RCC-susp lesion detected")
else:
print("Left RCC-susp lesion extraction failed")
else:
print("No left RCC")
else:
print("Left kidney extraction failed")
print("")
| 0.442877 | 0.338023 |
```
import numpy as np
import matplotlib.pyplot as plt
import h5py
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import model_from_json
import importlib
import plotting
from sklearn.metrics import accuracy_score
importlib.reload(plotting)
data_loc = '/gpfs/slac/atlas/fs1/d/rafaeltl/public/ML/L1RNN/datasets_2020_ff/'
file_str = 'Jan06_FlavFix_smear_1_std_xtd_zst.h5'
f5 = h5py.File(data_loc+file_str, 'r')
x_test = np.array( f5['x_test'] )
y_test = to_categorical ( np.array( f5['y_test'] ) )
# model_name = 'rnn_LSTM_50_10_nomask_LSTMKIvs_KRl20.0001'
# model_name = 'rnn_GRU_50_10_nomask_LSTMKIvs_KRl20.0001'
# model_name = 'rnn_LSTM_50_10_nomask_LSTMKIvs_KRl10.001l20.0001'
model_name = 'rnn_LSTM_64_10_nomask_KIvs_KRl20.0001'
arch_json = open('keras/model_'+model_name+'_arch.json', 'r').read()
model = model_from_json(arch_json)
model.load_weights(f'keras/model_{model_name}_weights.h5')
y_keras = model.predict(x_test, batch_size=2**10)
print("Accuracy: {}".format(accuracy_score(np.argmax(y_test, axis=1), np.argmax(y_keras, axis=1))))
plt.figure(figsize=(4,4))
_ = plotting.makeRoc(y_test, y_keras)
import tensorflow_model_optimization as tfmot
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
x_train = np.array( f5['x_train'] )
y_train = to_categorical ( np.array( f5['y_train'] ) )
val_split = 0.1
num_train = y_train.shape[0] * (1 - val_split)
batch_size = 2**14
epochs = 150
end_step = np.ceil(num_train / batch_size).astype(np.int32) * epochs
# Define model for pruning.
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50,
final_sparsity=0.80,
begin_step=0,
end_step=end_step)
}
model_for_pruning = prune_low_magnitude(model, **pruning_params)
import tensorflow as tf
model_for_pruning.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
model_for_pruning.summary()
import tempfile
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, validation_split=val_split,
callbacks=callbacks)
model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
model_for_export.summary()
y_keras_pruned = model_for_export.predict(x_test, batch_size=2**10)
print("Accuracy: {}".format(accuracy_score(np.argmax(y_test, axis=1), np.argmax(y_keras_pruned, axis=1))))
ppb_b = y_keras_pruned[:,0] [y_test[:,0] == 1]
ppc_b = y_keras_pruned[:,1] [y_test[:,0] == 1]
ppl_b = y_keras_pruned[:,2] [y_test[:,0] == 1]
ppc_c = y_keras_pruned[:,1] [y_test[:,1] == 1]
ppb_c = y_keras_pruned[:,0] [y_test[:,1] == 1]
ppl_l = y_keras_pruned[:,2] [y_test[:,2] == 1]
ppb_l = y_keras_pruned[:,0] [y_test[:,2] == 1]
plt.Figure()
plt.hist( ppb_b/(ppb_b+ppl_b), range=(0,1), bins=10000, histtype='step' )
plt.hist( ppb_l/(ppb_l+ppl_l), range=(0,1), bins=10000, histtype='step' )
plt.yscale('log')
plt.show()
plt.Figure()
plt.hist( ppb_b/(ppb_b+ppc_b), range=(0,1), bins=10000, histtype='step' )
plt.hist( ppb_c/(ppb_c+ppc_c), range=(0,1), bins=10000, histtype='step' )
plt.yscale('log')
plt.show()
fig, ax = plt.subplots(figsize=(9, 9))
_ = plotting.makeRoc(y_test, y_keras)
plt.gca().set_prop_cycle(None) # reset the colors
_ = plotting.makeRoc(y_test, y_keras_pruned, linestyle='--')
from matplotlib.lines import Line2D
lines = [Line2D([0], [0], ls='-'),
Line2D([0], [0], ls='--')]
from matplotlib.legend import Legend
leg = Legend(ax, lines, labels=['keras', 'pruned'],
loc='lower right', frameon=False)
ax.add_artist(leg)
pruned_model_json = model_for_export.to_json()
with open(f'keras/pruned_model_{model_name}_arch.json', "w") as json_file:
json_file.write(pruned_model_json)
model_for_export.save_weights(f'keras/pruned_model_{model_name}_weights.h5')
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import model_from_json
import importlib
import plotting
from sklearn.metrics import accuracy_score
importlib.reload(plotting)
data_loc = '/gpfs/slac/atlas/fs1/d/rafaeltl/public/ML/L1RNN/datasets_2020_ff/'
file_str = 'Jan06_FlavFix_smear_1_std_xtd_zst.h5'
f5 = h5py.File(data_loc+file_str, 'r')
x_test = np.array( f5['x_test'] )
y_test = to_categorical ( np.array( f5['y_test'] ) )
# model_name = 'rnn_LSTM_50_10_nomask_LSTMKIvs_KRl20.0001'
# model_name = 'rnn_GRU_50_10_nomask_LSTMKIvs_KRl20.0001'
# model_name = 'rnn_LSTM_50_10_nomask_LSTMKIvs_KRl10.001l20.0001'
model_name = 'rnn_LSTM_64_10_nomask_KIvs_KRl20.0001'
arch_json = open('keras/model_'+model_name+'_arch.json', 'r').read()
model = model_from_json(arch_json)
model.load_weights(f'keras/model_{model_name}_weights.h5')
y_keras = model.predict(x_test, batch_size=2**10)
print("Accuracy: {}".format(accuracy_score(np.argmax(y_test, axis=1), np.argmax(y_keras, axis=1))))
plt.figure(figsize=(4,4))
_ = plotting.makeRoc(y_test, y_keras)
import tensorflow_model_optimization as tfmot
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
x_train = np.array( f5['x_train'] )
y_train = to_categorical ( np.array( f5['y_train'] ) )
val_split = 0.1
num_train = y_train.shape[0] * (1 - val_split)
batch_size = 2**14
epochs = 150
end_step = np.ceil(num_train / batch_size).astype(np.int32) * epochs
# Define model for pruning.
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50,
final_sparsity=0.80,
begin_step=0,
end_step=end_step)
}
model_for_pruning = prune_low_magnitude(model, **pruning_params)
import tensorflow as tf
model_for_pruning.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
model_for_pruning.summary()
import tempfile
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs, validation_split=val_split,
callbacks=callbacks)
model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
model_for_export.summary()
y_keras_pruned = model_for_export.predict(x_test, batch_size=2**10)
print("Accuracy: {}".format(accuracy_score(np.argmax(y_test, axis=1), np.argmax(y_keras_pruned, axis=1))))
ppb_b = y_keras_pruned[:,0] [y_test[:,0] == 1]
ppc_b = y_keras_pruned[:,1] [y_test[:,0] == 1]
ppl_b = y_keras_pruned[:,2] [y_test[:,0] == 1]
ppc_c = y_keras_pruned[:,1] [y_test[:,1] == 1]
ppb_c = y_keras_pruned[:,0] [y_test[:,1] == 1]
ppl_l = y_keras_pruned[:,2] [y_test[:,2] == 1]
ppb_l = y_keras_pruned[:,0] [y_test[:,2] == 1]
plt.Figure()
plt.hist( ppb_b/(ppb_b+ppl_b), range=(0,1), bins=10000, histtype='step' )
plt.hist( ppb_l/(ppb_l+ppl_l), range=(0,1), bins=10000, histtype='step' )
plt.yscale('log')
plt.show()
plt.Figure()
plt.hist( ppb_b/(ppb_b+ppc_b), range=(0,1), bins=10000, histtype='step' )
plt.hist( ppb_c/(ppb_c+ppc_c), range=(0,1), bins=10000, histtype='step' )
plt.yscale('log')
plt.show()
fig, ax = plt.subplots(figsize=(9, 9))
_ = plotting.makeRoc(y_test, y_keras)
plt.gca().set_prop_cycle(None) # reset the colors
_ = plotting.makeRoc(y_test, y_keras_pruned, linestyle='--')
from matplotlib.lines import Line2D
lines = [Line2D([0], [0], ls='-'),
Line2D([0], [0], ls='--')]
from matplotlib.legend import Legend
leg = Legend(ax, lines, labels=['keras', 'pruned'],
loc='lower right', frameon=False)
ax.add_artist(leg)
pruned_model_json = model_for_export.to_json()
with open(f'keras/pruned_model_{model_name}_arch.json', "w") as json_file:
json_file.write(pruned_model_json)
model_for_export.save_weights(f'keras/pruned_model_{model_name}_weights.h5')
| 0.571647 | 0.527742 |
<img src='http://pycircle.org/static/pycircle_big.png' style="margin-left:auto; margin-right:auto; height:70%; width:70%">
# Wprowadzenie część 2
```
help([1, 2, 3])
dir([1, 2, 3])
sum??
```
# Funkcje wbudowane
```
all([1==1, True, 10, -1, False, 3*5==1]), all([1==5, True, 10, -1])
any([False, True]), any([False, False])
bin(12), oct(12), hex(12), int('12'), float(12.)
ord('A'), chr(65)
raw_input(u"Podaj liczbę: ")
zip([1,2,3, 3], [2, 3, 4, 10])
sorted([8, 3, 12, 9, 3]), reversed(range(10)), list(reversed(range(10)))
len([3, 2, 1]), len([[1, 2], [3, 4, 5]])
list(), dict(), set(), tuple()
```
## Tuple (krotka)
```
A = (1, 2, 3)
B = [1, 2, 3]
A == B
```
Czym się różni krotka od listy?
## Set (zbiory)
```
A = set()
A.add(2)
A.add(3)
A.add(4)
A
A.add(3)
A
B = set((4, 5, 6))
A.difference(B)
A.symmetric_difference(B)
A.intersection(B)
A.union(B)
```
## Prosta matematyka
```
pow(2, 10), divmod(10, 3), sum([1, 2, 3])
round(0.5), round(0.2), round(0.9)
min([1, 2, 3]), max([1, 2, 3])
abs(10), abs(-10)
24 % 5, 24 % 2
```
## Trochę programowania funkcyjnego
###map, filter, reduce
### wyrażenie lambda $\lambda$
```
f = lambda x: x+1
f(3)
f = lambda a, b: a+b**3
f(2, 3)
map(lambda x: x+10, [0, 2, 5, 234])
[x+10 for x in [0, 2]]
map(chr, [80, 121, 67, 105, 114, 99, 108, 101])
[chr(x) for x in [80, 121, 67, 105, 114, 99, 108, 101]]
filter(lambda x: x > 0, [-1, 0, 4, -3, 2])
[x for x in [-1, 0, 4, -3, 2] if x > 0]
reduce(lambda a, b: a - b, [2, 3, 4])
2 - 3 - 4
```
Więcej informacji temat funkcji wbudowanych na https://docs.python.org/2/library/functions.html
# Zadania 1
1 . Napisz kod tworzący listę z przedziału $[0, 100]$ liczb podzielnych przez 3 ale nie podzielnych przez 9
2 . Napisz kod który zwraca unikalne elementy z podanej listy
3 . Napisz kod który znajdzie maksimum wartości słownika
# Pliki
```
%ls -l
fp = open("pycircle.txt", "w")
%ls -l
fp.write("Hello world\n")
fp.close()
%cat pycircle.txt
with open("pycircle.txt") as fp:
print fp.read(),
```
# Funkcje
```
def fun1(a):
a.append(9)
return a
def fun2(a=[]):
a.append(9)
return a
lista1 = [1, 2, 3]
lista2 = [3, 4, 5]
fun1(lista1), fun2(lista2)
def fun2(a=[]):
a.append(9)
return a
fun2()
fun2()
fun2()
```
# LEGB
<img src="http://sandeeps.in/_images/python_legb.png" style="margin-left:auto; margin-right:auto;">
```
def show_local():
x = 23
print("Local: %s" % x)
show_local()
def show_enclosing(a):
def enclosing():
print("Enclosing: %s" % a)
enclosing()
show_enclosing(5)
x = 43
def show_global():
print("Global %s" % x)
show_global()
def show_built():
print("Built-in: %s" % abs)
show_built()
x = 43
def what_x():
print(x)
x = 4
what_x()
x = 43
def encl_x():
x = 23
def enclosing():
print("Enclosing: %s" % x)
enclosing()
encl_x()
x = 43
def what_about_globals():
global x
x = 37
print("In function %s" % x)
what_about_globals()
print("After function %s" % x)
```
# Funkcje to też obiekty!
```
def f(x):
f.l += x
print "x: ", x
print "f.l: ", f.l
f.l = 10
f(2)
f(14)
```
# Fabryki funkcji
```
def powerer(power):
def nested(number):
return number ** power
return nested
f = powerer(3)
f(2), f(10)
def licznik(start):
def nested(label):
print(label, nested.state)
nested.state += 1
nested.state = start
return nested
f = licznik(0)
f('a')
f('b')
f('c')
```
# Zadania 2
1 . Napisz funkcję która stworzy plik z pierwiastkami liczb z zakresu $[0, 100]$ (całkowite), każdy w osobnej linii
2 . Napisz funkcję wczytująca pierwiastki z pliku z poprzedniego zadania, oblicz ich sumę i dopisz do pliku
3 . Napisz funkcję która będzie działała jak `''.join()` za pomocą `reduce`
```
' '.join(['a', 'b', 'c'])
def my_join(joining_str, list_of_str):
return reduce
my_join(" ", ['a', 'b', 'c'])
' '.join(['a', 'b', 'c'])
```
|
github_jupyter
|
help([1, 2, 3])
dir([1, 2, 3])
sum??
all([1==1, True, 10, -1, False, 3*5==1]), all([1==5, True, 10, -1])
any([False, True]), any([False, False])
bin(12), oct(12), hex(12), int('12'), float(12.)
ord('A'), chr(65)
raw_input(u"Podaj liczbę: ")
zip([1,2,3, 3], [2, 3, 4, 10])
sorted([8, 3, 12, 9, 3]), reversed(range(10)), list(reversed(range(10)))
len([3, 2, 1]), len([[1, 2], [3, 4, 5]])
list(), dict(), set(), tuple()
A = (1, 2, 3)
B = [1, 2, 3]
A == B
A = set()
A.add(2)
A.add(3)
A.add(4)
A
A.add(3)
A
B = set((4, 5, 6))
A.difference(B)
A.symmetric_difference(B)
A.intersection(B)
A.union(B)
pow(2, 10), divmod(10, 3), sum([1, 2, 3])
round(0.5), round(0.2), round(0.9)
min([1, 2, 3]), max([1, 2, 3])
abs(10), abs(-10)
24 % 5, 24 % 2
f = lambda x: x+1
f(3)
f = lambda a, b: a+b**3
f(2, 3)
map(lambda x: x+10, [0, 2, 5, 234])
[x+10 for x in [0, 2]]
map(chr, [80, 121, 67, 105, 114, 99, 108, 101])
[chr(x) for x in [80, 121, 67, 105, 114, 99, 108, 101]]
filter(lambda x: x > 0, [-1, 0, 4, -3, 2])
[x for x in [-1, 0, 4, -3, 2] if x > 0]
reduce(lambda a, b: a - b, [2, 3, 4])
2 - 3 - 4
%ls -l
fp = open("pycircle.txt", "w")
%ls -l
fp.write("Hello world\n")
fp.close()
%cat pycircle.txt
with open("pycircle.txt") as fp:
print fp.read(),
def fun1(a):
a.append(9)
return a
def fun2(a=[]):
a.append(9)
return a
lista1 = [1, 2, 3]
lista2 = [3, 4, 5]
fun1(lista1), fun2(lista2)
def fun2(a=[]):
a.append(9)
return a
fun2()
fun2()
fun2()
def show_local():
x = 23
print("Local: %s" % x)
show_local()
def show_enclosing(a):
def enclosing():
print("Enclosing: %s" % a)
enclosing()
show_enclosing(5)
x = 43
def show_global():
print("Global %s" % x)
show_global()
def show_built():
print("Built-in: %s" % abs)
show_built()
x = 43
def what_x():
print(x)
x = 4
what_x()
x = 43
def encl_x():
x = 23
def enclosing():
print("Enclosing: %s" % x)
enclosing()
encl_x()
x = 43
def what_about_globals():
global x
x = 37
print("In function %s" % x)
what_about_globals()
print("After function %s" % x)
def f(x):
f.l += x
print "x: ", x
print "f.l: ", f.l
f.l = 10
f(2)
f(14)
def powerer(power):
def nested(number):
return number ** power
return nested
f = powerer(3)
f(2), f(10)
def licznik(start):
def nested(label):
print(label, nested.state)
nested.state += 1
nested.state = start
return nested
f = licznik(0)
f('a')
f('b')
f('c')
' '.join(['a', 'b', 'c'])
def my_join(joining_str, list_of_str):
return reduce
my_join(" ", ['a', 'b', 'c'])
' '.join(['a', 'b', 'c'])
| 0.220259 | 0.889385 |
# Pan-genomes comparison report
```
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib_venn import venn2, venn3
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
# inputs
# PG names
pg1_name = "<PG1_NAME>"
pg2_name = "<PG2_NAME>"
# PAV
pg1_pav = "<PG1_PAV>"
pg2_pav = "<PG2_PAV>"
true_pg_pav = "<TRUE_PAV>"
# Non-ref matches TSV
pg1_vs_pg2_matches = "<PG1_VS_PG2_NON_REF_MATCHES>"
pg1_vs_true_matches = "<PG1_VS_TRUE_NON_REF_MATCHES>"
pg2_vs_true_matches = "<PG2_VS_TRUE_NON_REF_MATCHES>"
# read in PAV and matches tables
pg1_pav_df = pd.read_csv(pg1_pav, sep='\t', index_col = 0)
pg2_pav_df = pd.read_csv(pg2_pav, sep='\t', index_col = 0)
# assuming same sample names, adjust order
pg2_pav_df = pg2_pav_df[list(pg1_pav_df.columns)]
pg1_vs_pg2_matches_df = pd.read_csv(pg1_vs_pg2_matches, sep='\t')
# number of samples in PGs
n_samples = pg1_pav_df.shape[1]
# convert PG1 and PG2 to common naming, according to true PG
def match_name(row, matches_df, pg_name, other_pg_name, rename):
if row.name.startswith('PanGene'):
if row.name in matches_df[pg_name].values:
if rename:
return matches_df.loc[matches_df[pg_name] == row.name][other_pg_name].iloc[0]
else:
return row.name
else:
return row.name + '__' + pg_name + "_unmatched"
else:
return re.sub(r'[^0-9a-zA-Z\-\._]+','_',row.name)
# create rename series
pg1_rename = pg1_pav_df.apply(match_name, args=(pg1_vs_pg2_matches_df, pg1_name, pg2_name, False), axis=1)
pg2_rename = pg2_pav_df.apply(match_name, args=(pg1_vs_pg2_matches_df, pg2_name, pg1_name, True), axis=1)
# rename
pg1_pav_df.index = pg1_pav_df.index.map(pg1_rename)
pg2_pav_df.index = pg2_pav_df.index.map(pg2_rename)
# calculate pan-gene occupancies
pg1_occupancy = pg1_pav_df.sum(axis=1)
pg1_occupancy = pg1_occupancy.loc[pg1_occupancy > 0]
pg2_occupancy = pg2_pav_df.sum(axis=1)
pg2_occupancy = pg2_occupancy.loc[pg2_occupancy > 0]
```
## Basic stats
```
def stats_from_pav_df(df):
total_pangenes = df.shape[0]
non_ref_pangenes = df.loc[df.index.str.startswith('PanGene')].shape[0]
ref_pangenes = total_pangenes - non_ref_pangenes
non_ref_unmatched = df.loc[(df.index.str.startswith('PanGene')) & (df.index.str.endswith('_unmatched'))].shape[0]
non_ref_matched = non_ref_pangenes - non_ref_unmatched
n_samples = df.shape[1]
occup = df.sum(axis=1)
core = (occup == n_samples).sum()
shell = (occup.between(1,n_samples,inclusive=False)).sum()
singletons = (occup == 1).sum()
index = ['Total pan-genes', 'Reference pan-genes', 'Non-reference pan-genes',
'Matched non-reference pan-genes', 'Unmatched non-reference pan-genes',
'Core pan-genes', 'Shell pan-genes', 'Singletons']
values = [total_pangenes, ref_pangenes, non_ref_pangenes, non_ref_matched, non_ref_unmatched, core, shell, singletons]
return pd.Series(values, index = index)
pg1_stats = stats_from_pav_df(pg1_pav_df)
pg2_stats = stats_from_pav_df(pg2_pav_df)
stats_df = pd.concat([pg1_stats,pg2_stats], axis=1)
stats_df.columns = [pg1_name, pg2_name]
stats_df
# plot overlap of non-ref genes
pg1_nonref_genes = set(pg1_pav_df.loc[pg1_pav_df.index.str.startswith('PanGene')].index)
pg2_nonref_genes = set(pg2_pav_df.loc[pg2_pav_df.index.str.startswith('PanGene')].index)
venn2([pg1_nonref_genes, pg2_nonref_genes], set_labels=[pg1_name,pg2_name])
plt.title('Overlap of non-reference genes')
plt.show()
# plot occupancy distributions
pg1_occup_counts = pg1_occupancy.value_counts().sort_index()
pg2_occup_counts = pg2_occupancy.value_counts().sort_index()
x = pg1_occup_counts.index
fig = go.Figure(data=[
go.Bar(name=pg1_name, x=x, y=pg1_occup_counts),
go.Bar(name=pg2_name, x=x, y=pg2_occup_counts)]
)
# Change the bar mode
fig.update_layout(barmode='group', title='Occupancy histogram', xaxis_title="Occupancy", yaxis_title="# of pan-genes")
fig.show()
# plot number of genes per accession
pg1_genes_per_acc = pg1_pav_df.sum()
pg2_genes_per_acc = pg2_pav_df.sum()
x = pg1_genes_per_acc.index
fig = go.Figure(data=[
go.Bar(name=pg1_name, x=x, y=pg1_genes_per_acc),
go.Bar(name=pg2_name, x=x, y=pg2_genes_per_acc),
])
# Change the bar mode
fig.update_layout(barmode='group', title='Pan-genes per accession', xaxis_title="Accession", yaxis_title="# of pan-genes")
fig.show()
```
## Discrepancies between pan-genomes
```
# Add unmatched pan-genes from each PG to the other PG (as absent in all samples)
# this ensures both PGs have the same set of genes
pg1_unmatched_df = pg1_pav_df.loc[~pg1_pav_df.index.isin(pg2_pav_df.index)]
for col in pg1_unmatched_df.columns:
pg1_unmatched_df[col].values[:] = 0
pg2_unmatched_df = pg2_pav_df.loc[~pg2_pav_df.index.isin(pg1_pav_df.index)]
for col in pg2_unmatched_df.columns:
pg2_unmatched_df[col].values[:] = 0
pg1_pav_df_plus_pg2_unmatched = pg1_pav_df.append(pg2_unmatched_df)
pg2_pav_df_plus_pg1_unmatched = pg2_pav_df.append(pg1_unmatched_df)
# sort columns and gene nmaes in both DFs, so the order is identical
accessions = list(pg1_pav_df_plus_pg2_unmatched.columns.sort_values())
pg1_pav_df_plus_pg2_unmatched = pg1_pav_df_plus_pg2_unmatched[accessions].sort_index()
pg2_pav_df_plus_pg1_unmatched = pg2_pav_df_plus_pg1_unmatched[accessions].sort_index()
# find discrepancies
pav_diff = (pg1_pav_df_plus_pg2_unmatched - pg2_pav_df_plus_pg1_unmatched)
pg1_raname_df = pd.DataFrame(pg1_rename).reset_index()
pg1_raname_df.columns = [pg1_name + '_orig_name', 'new_name']
pg2_raname_df = pd.DataFrame(pg2_rename).reset_index()
pg2_raname_df.columns = [pg2_name + '_orig_name', 'new_name']
# create discrepancies table
discrep_df = pav_diff.reset_index().melt(id_vars='gene', value_vars=pav_diff.columns)
discrep_df.columns = ['gene','sample','type']
discrep_df = discrep_df.loc[discrep_df['type'] != 0]
# add original gene names
discrep_df = discrep_df.merge(pg1_raname_df, how='left', left_on='gene', right_on='new_name')
discrep_df =discrep_df.merge(pg2_raname_df, how='left', left_on='gene', right_on='new_name')
discrep_df = discrep_df[['gene', pg1_name + '_orig_name', pg2_name + '_orig_name', 'sample', 'type']]
# print to file
discrep_df.to_csv('discrepancies.tsv', sep='\t', index=False)
# calculate stats (separate by ref vs. non-ref)
total_cells = pav_diff.count().sum()
total_discrep = (pav_diff != 0).astype(int).sum(axis=1).sum()
in_pg1_not_in_pg2 = (pav_diff == 1).astype(int).sum(axis=1).sum()
in_pg2_not_in_pg1 = (pav_diff == -1).astype(int).sum(axis=1).sum()
pav_diff_ref = pav_diff.loc[~(pav_diff.index.str.startswith('PanGene'))]
pav_diff_nonref = pav_diff.loc[pav_diff.index.str.startswith('PanGene')]
total_ref_cells = pav_diff_ref.count().sum()
total_nonref_cells = pav_diff_nonref.count().sum()
ref_discrep = (pav_diff_ref != 0).astype(int).sum(axis=1).sum()
ref_in_pg1_not_in_pg2 = (pav_diff_ref == 1).astype(int).sum(axis=1).sum()
ref_in_pg2_not_in_pg1 = (pav_diff_ref == -1).astype(int).sum(axis=1).sum()
nonref_discrep = (pav_diff_nonref != 0).astype(int).sum(axis=1).sum()
nonref_in_pg1_not_in_pg2 = (pav_diff_nonref == 1).astype(int).sum(axis=1).sum()
nonref_in_pg2_not_in_pg1 = (pav_diff_nonref == -1).astype(int).sum(axis=1).sum()
# create discrepancies stats table
ind = ['All', 'Ref', 'Non-ref']
cells = [total_cells, total_ref_cells, total_nonref_cells]
discrep = [total_discrep, ref_discrep, nonref_discrep]
pres_in_pg1_abs_in_pg2 = [in_pg1_not_in_pg2, ref_in_pg1_not_in_pg2, nonref_in_pg1_not_in_pg2]
pres_in_pg2_abs_in_pg1 = [in_pg2_not_in_pg1, ref_in_pg2_not_in_pg1, nonref_in_pg2_not_in_pg1]
discrep_stats_df = pd.DataFrame({'Cells': cells,
"Total discrepancies": discrep,
"P in %s and A in %s" %(pg1_name,pg2_name) : pres_in_pg1_abs_in_pg2,
"P in %s and A in %s" %(pg2_name,pg1_name) : pres_in_pg2_abs_in_pg1},
index = ind)
discrep_stats_df
# discrepancies per gene
discrep_per_gene = pav_diff.apply(lambda row: abs(row).sum(), axis=1)
fig = px.histogram(discrep_per_gene, title="Histogram of discrepancies per pan-gene",
labels={'value': '# of discrepancies'})
fig.show()
# discrepancies per gene - non-ref only
discrep_per_nonref_gene = pav_diff_nonref.apply(lambda row: abs(row).sum(), axis=1)
fig = px.histogram(discrep_per_nonref_gene, title="Histogram of discrepancies per non-ref pan-gene",
labels={'value': '# of discrepancies'})
fig.show()
# occupancy diff
pg1_pav_df_plus_pg2_unmatched['occupancy'] = pg1_pav_df_plus_pg2_unmatched.sum(axis=1)
pg2_pav_df_plus_pg1_unmatched['occupancy'] = pg2_pav_df_plus_pg1_unmatched.sum(axis=1)
occup_diff = pg1_pav_df_plus_pg2_unmatched['occupancy'] - pg2_pav_df_plus_pg1_unmatched['occupancy']
fig = px.histogram(occup_diff, title="Histogram of occupancy differences",
labels={'value': 'Occupancy difference'})
fig.show()
# occupancy diff - non-ref only
occup_diff_nonref = pg1_pav_df_plus_pg2_unmatched.loc[pav_diff.index.str.startswith('PanGene')]['occupancy'] - pg2_pav_df_plus_pg1_unmatched.loc[pav_diff.index.str.startswith('PanGene')]['occupancy']
fig = px.histogram(occup_diff_nonref, title="Histogram of occupancy differences of non-reference pan-genes",
labels={'value': 'Occupancy difference'})
fig.show()
# occupancy in PG1 vs. occupancy in PG2
tmp_df = pd.concat([pg1_pav_df_plus_pg2_unmatched['occupancy'], pg2_pav_df_plus_pg1_unmatched['occupancy']], axis=1)
tmp_df['pan-gene'] = tmp_df.index
tmp_df.columns = [pg1_name + ' occupancy', pg2_name + ' occupancy','pan-gene']
tmp_df = tmp_df.groupby([pg1_name + ' occupancy', pg2_name + ' occupancy']).count().unstack(level=0).fillna(0)
tmp_df.columns = tmp_df.columns.droplevel(0)
tmp_df = tmp_df.transpose()
tmp_df.loc[:,0:n_samples] = tmp_df.loc[:,0:n_samples].div(tmp_df.sum(axis=1), axis=0)*100
fig = px.imshow(tmp_df)
fig.show()
# occupancy vs. discrepancies
# use occupancies of true PG
tmp_df = pd.concat([pg1_occupancy, discrep_per_gene], axis=1, join='inner')
tmp_df['pan-gene'] = tmp_df.index
tmp_df.columns = ['occupancy','discrepancies','pan-gene']
tmp_df = tmp_df.groupby(['occupancy', 'discrepancies']).count().unstack(level=0).fillna(0)
tmp_df.columns = tmp_df.columns.droplevel(0)
tmp_df = tmp_df.transpose()
tmp_df.loc[:,0:n_samples] = tmp_df.loc[:,0:n_samples].div(tmp_df.sum(axis=1), axis=0)*100
fig = px.imshow(tmp_df)
fig.show()
```
|
github_jupyter
|
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib_venn import venn2, venn3
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
# inputs
# PG names
pg1_name = "<PG1_NAME>"
pg2_name = "<PG2_NAME>"
# PAV
pg1_pav = "<PG1_PAV>"
pg2_pav = "<PG2_PAV>"
true_pg_pav = "<TRUE_PAV>"
# Non-ref matches TSV
pg1_vs_pg2_matches = "<PG1_VS_PG2_NON_REF_MATCHES>"
pg1_vs_true_matches = "<PG1_VS_TRUE_NON_REF_MATCHES>"
pg2_vs_true_matches = "<PG2_VS_TRUE_NON_REF_MATCHES>"
# read in PAV and matches tables
pg1_pav_df = pd.read_csv(pg1_pav, sep='\t', index_col = 0)
pg2_pav_df = pd.read_csv(pg2_pav, sep='\t', index_col = 0)
# assuming same sample names, adjust order
pg2_pav_df = pg2_pav_df[list(pg1_pav_df.columns)]
pg1_vs_pg2_matches_df = pd.read_csv(pg1_vs_pg2_matches, sep='\t')
# number of samples in PGs
n_samples = pg1_pav_df.shape[1]
# convert PG1 and PG2 to common naming, according to true PG
def match_name(row, matches_df, pg_name, other_pg_name, rename):
if row.name.startswith('PanGene'):
if row.name in matches_df[pg_name].values:
if rename:
return matches_df.loc[matches_df[pg_name] == row.name][other_pg_name].iloc[0]
else:
return row.name
else:
return row.name + '__' + pg_name + "_unmatched"
else:
return re.sub(r'[^0-9a-zA-Z\-\._]+','_',row.name)
# create rename series
pg1_rename = pg1_pav_df.apply(match_name, args=(pg1_vs_pg2_matches_df, pg1_name, pg2_name, False), axis=1)
pg2_rename = pg2_pav_df.apply(match_name, args=(pg1_vs_pg2_matches_df, pg2_name, pg1_name, True), axis=1)
# rename
pg1_pav_df.index = pg1_pav_df.index.map(pg1_rename)
pg2_pav_df.index = pg2_pav_df.index.map(pg2_rename)
# calculate pan-gene occupancies
pg1_occupancy = pg1_pav_df.sum(axis=1)
pg1_occupancy = pg1_occupancy.loc[pg1_occupancy > 0]
pg2_occupancy = pg2_pav_df.sum(axis=1)
pg2_occupancy = pg2_occupancy.loc[pg2_occupancy > 0]
def stats_from_pav_df(df):
total_pangenes = df.shape[0]
non_ref_pangenes = df.loc[df.index.str.startswith('PanGene')].shape[0]
ref_pangenes = total_pangenes - non_ref_pangenes
non_ref_unmatched = df.loc[(df.index.str.startswith('PanGene')) & (df.index.str.endswith('_unmatched'))].shape[0]
non_ref_matched = non_ref_pangenes - non_ref_unmatched
n_samples = df.shape[1]
occup = df.sum(axis=1)
core = (occup == n_samples).sum()
shell = (occup.between(1,n_samples,inclusive=False)).sum()
singletons = (occup == 1).sum()
index = ['Total pan-genes', 'Reference pan-genes', 'Non-reference pan-genes',
'Matched non-reference pan-genes', 'Unmatched non-reference pan-genes',
'Core pan-genes', 'Shell pan-genes', 'Singletons']
values = [total_pangenes, ref_pangenes, non_ref_pangenes, non_ref_matched, non_ref_unmatched, core, shell, singletons]
return pd.Series(values, index = index)
pg1_stats = stats_from_pav_df(pg1_pav_df)
pg2_stats = stats_from_pav_df(pg2_pav_df)
stats_df = pd.concat([pg1_stats,pg2_stats], axis=1)
stats_df.columns = [pg1_name, pg2_name]
stats_df
# plot overlap of non-ref genes
pg1_nonref_genes = set(pg1_pav_df.loc[pg1_pav_df.index.str.startswith('PanGene')].index)
pg2_nonref_genes = set(pg2_pav_df.loc[pg2_pav_df.index.str.startswith('PanGene')].index)
venn2([pg1_nonref_genes, pg2_nonref_genes], set_labels=[pg1_name,pg2_name])
plt.title('Overlap of non-reference genes')
plt.show()
# plot occupancy distributions
pg1_occup_counts = pg1_occupancy.value_counts().sort_index()
pg2_occup_counts = pg2_occupancy.value_counts().sort_index()
x = pg1_occup_counts.index
fig = go.Figure(data=[
go.Bar(name=pg1_name, x=x, y=pg1_occup_counts),
go.Bar(name=pg2_name, x=x, y=pg2_occup_counts)]
)
# Change the bar mode
fig.update_layout(barmode='group', title='Occupancy histogram', xaxis_title="Occupancy", yaxis_title="# of pan-genes")
fig.show()
# plot number of genes per accession
pg1_genes_per_acc = pg1_pav_df.sum()
pg2_genes_per_acc = pg2_pav_df.sum()
x = pg1_genes_per_acc.index
fig = go.Figure(data=[
go.Bar(name=pg1_name, x=x, y=pg1_genes_per_acc),
go.Bar(name=pg2_name, x=x, y=pg2_genes_per_acc),
])
# Change the bar mode
fig.update_layout(barmode='group', title='Pan-genes per accession', xaxis_title="Accession", yaxis_title="# of pan-genes")
fig.show()
# Add unmatched pan-genes from each PG to the other PG (as absent in all samples)
# this ensures both PGs have the same set of genes
pg1_unmatched_df = pg1_pav_df.loc[~pg1_pav_df.index.isin(pg2_pav_df.index)]
for col in pg1_unmatched_df.columns:
pg1_unmatched_df[col].values[:] = 0
pg2_unmatched_df = pg2_pav_df.loc[~pg2_pav_df.index.isin(pg1_pav_df.index)]
for col in pg2_unmatched_df.columns:
pg2_unmatched_df[col].values[:] = 0
pg1_pav_df_plus_pg2_unmatched = pg1_pav_df.append(pg2_unmatched_df)
pg2_pav_df_plus_pg1_unmatched = pg2_pav_df.append(pg1_unmatched_df)
# sort columns and gene nmaes in both DFs, so the order is identical
accessions = list(pg1_pav_df_plus_pg2_unmatched.columns.sort_values())
pg1_pav_df_plus_pg2_unmatched = pg1_pav_df_plus_pg2_unmatched[accessions].sort_index()
pg2_pav_df_plus_pg1_unmatched = pg2_pav_df_plus_pg1_unmatched[accessions].sort_index()
# find discrepancies
pav_diff = (pg1_pav_df_plus_pg2_unmatched - pg2_pav_df_plus_pg1_unmatched)
pg1_raname_df = pd.DataFrame(pg1_rename).reset_index()
pg1_raname_df.columns = [pg1_name + '_orig_name', 'new_name']
pg2_raname_df = pd.DataFrame(pg2_rename).reset_index()
pg2_raname_df.columns = [pg2_name + '_orig_name', 'new_name']
# create discrepancies table
discrep_df = pav_diff.reset_index().melt(id_vars='gene', value_vars=pav_diff.columns)
discrep_df.columns = ['gene','sample','type']
discrep_df = discrep_df.loc[discrep_df['type'] != 0]
# add original gene names
discrep_df = discrep_df.merge(pg1_raname_df, how='left', left_on='gene', right_on='new_name')
discrep_df =discrep_df.merge(pg2_raname_df, how='left', left_on='gene', right_on='new_name')
discrep_df = discrep_df[['gene', pg1_name + '_orig_name', pg2_name + '_orig_name', 'sample', 'type']]
# print to file
discrep_df.to_csv('discrepancies.tsv', sep='\t', index=False)
# calculate stats (separate by ref vs. non-ref)
total_cells = pav_diff.count().sum()
total_discrep = (pav_diff != 0).astype(int).sum(axis=1).sum()
in_pg1_not_in_pg2 = (pav_diff == 1).astype(int).sum(axis=1).sum()
in_pg2_not_in_pg1 = (pav_diff == -1).astype(int).sum(axis=1).sum()
pav_diff_ref = pav_diff.loc[~(pav_diff.index.str.startswith('PanGene'))]
pav_diff_nonref = pav_diff.loc[pav_diff.index.str.startswith('PanGene')]
total_ref_cells = pav_diff_ref.count().sum()
total_nonref_cells = pav_diff_nonref.count().sum()
ref_discrep = (pav_diff_ref != 0).astype(int).sum(axis=1).sum()
ref_in_pg1_not_in_pg2 = (pav_diff_ref == 1).astype(int).sum(axis=1).sum()
ref_in_pg2_not_in_pg1 = (pav_diff_ref == -1).astype(int).sum(axis=1).sum()
nonref_discrep = (pav_diff_nonref != 0).astype(int).sum(axis=1).sum()
nonref_in_pg1_not_in_pg2 = (pav_diff_nonref == 1).astype(int).sum(axis=1).sum()
nonref_in_pg2_not_in_pg1 = (pav_diff_nonref == -1).astype(int).sum(axis=1).sum()
# create discrepancies stats table
ind = ['All', 'Ref', 'Non-ref']
cells = [total_cells, total_ref_cells, total_nonref_cells]
discrep = [total_discrep, ref_discrep, nonref_discrep]
pres_in_pg1_abs_in_pg2 = [in_pg1_not_in_pg2, ref_in_pg1_not_in_pg2, nonref_in_pg1_not_in_pg2]
pres_in_pg2_abs_in_pg1 = [in_pg2_not_in_pg1, ref_in_pg2_not_in_pg1, nonref_in_pg2_not_in_pg1]
discrep_stats_df = pd.DataFrame({'Cells': cells,
"Total discrepancies": discrep,
"P in %s and A in %s" %(pg1_name,pg2_name) : pres_in_pg1_abs_in_pg2,
"P in %s and A in %s" %(pg2_name,pg1_name) : pres_in_pg2_abs_in_pg1},
index = ind)
discrep_stats_df
# discrepancies per gene
discrep_per_gene = pav_diff.apply(lambda row: abs(row).sum(), axis=1)
fig = px.histogram(discrep_per_gene, title="Histogram of discrepancies per pan-gene",
labels={'value': '# of discrepancies'})
fig.show()
# discrepancies per gene - non-ref only
discrep_per_nonref_gene = pav_diff_nonref.apply(lambda row: abs(row).sum(), axis=1)
fig = px.histogram(discrep_per_nonref_gene, title="Histogram of discrepancies per non-ref pan-gene",
labels={'value': '# of discrepancies'})
fig.show()
# occupancy diff
pg1_pav_df_plus_pg2_unmatched['occupancy'] = pg1_pav_df_plus_pg2_unmatched.sum(axis=1)
pg2_pav_df_plus_pg1_unmatched['occupancy'] = pg2_pav_df_plus_pg1_unmatched.sum(axis=1)
occup_diff = pg1_pav_df_plus_pg2_unmatched['occupancy'] - pg2_pav_df_plus_pg1_unmatched['occupancy']
fig = px.histogram(occup_diff, title="Histogram of occupancy differences",
labels={'value': 'Occupancy difference'})
fig.show()
# occupancy diff - non-ref only
occup_diff_nonref = pg1_pav_df_plus_pg2_unmatched.loc[pav_diff.index.str.startswith('PanGene')]['occupancy'] - pg2_pav_df_plus_pg1_unmatched.loc[pav_diff.index.str.startswith('PanGene')]['occupancy']
fig = px.histogram(occup_diff_nonref, title="Histogram of occupancy differences of non-reference pan-genes",
labels={'value': 'Occupancy difference'})
fig.show()
# occupancy in PG1 vs. occupancy in PG2
tmp_df = pd.concat([pg1_pav_df_plus_pg2_unmatched['occupancy'], pg2_pav_df_plus_pg1_unmatched['occupancy']], axis=1)
tmp_df['pan-gene'] = tmp_df.index
tmp_df.columns = [pg1_name + ' occupancy', pg2_name + ' occupancy','pan-gene']
tmp_df = tmp_df.groupby([pg1_name + ' occupancy', pg2_name + ' occupancy']).count().unstack(level=0).fillna(0)
tmp_df.columns = tmp_df.columns.droplevel(0)
tmp_df = tmp_df.transpose()
tmp_df.loc[:,0:n_samples] = tmp_df.loc[:,0:n_samples].div(tmp_df.sum(axis=1), axis=0)*100
fig = px.imshow(tmp_df)
fig.show()
# occupancy vs. discrepancies
# use occupancies of true PG
tmp_df = pd.concat([pg1_occupancy, discrep_per_gene], axis=1, join='inner')
tmp_df['pan-gene'] = tmp_df.index
tmp_df.columns = ['occupancy','discrepancies','pan-gene']
tmp_df = tmp_df.groupby(['occupancy', 'discrepancies']).count().unstack(level=0).fillna(0)
tmp_df.columns = tmp_df.columns.droplevel(0)
tmp_df = tmp_df.transpose()
tmp_df.loc[:,0:n_samples] = tmp_df.loc[:,0:n_samples].div(tmp_df.sum(axis=1), axis=0)*100
fig = px.imshow(tmp_df)
fig.show()
| 0.482185 | 0.499939 |
# Logistic Regression - K-Fold
Logistic Regression is a Machine Learning classification algorithm that is used to predict the probability of a categorical dependent variable. In logistic regression, the dependent variable is a binary variable that contains data coded as 1 (yes, success, etc.) or 0 (no, failure, etc.). In other words, the logistic regression model predicts P(Y=1) as a function of X.
## Original Dataset
```
# Load Libs
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
import ds_functions as ds
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.model_selection import KFold, cross_val_score
N_SPLITS = 10
data: pd.DataFrame = pd.read_csv('../../datasets/heart_failure_clinical_records_dataset.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
def kfold_model(X, y, penalty, C, max_iters):
# Compute the Average metrics and CNFMTX for a set of hyper-parameters
kf = KFold(n_splits = N_SPLITS, shuffle=True)
acc = [0, 0]
recall = [0, 0]
specificity = [0, 0]
precision = [0, 0]
matrices = np.zeros((2, 2, N_SPLITS))
h = 0
for train_index, test_index in kf.split(X):
trnX, tstX = X[train_index], X[test_index]
trnY, tstY = y[train_index], y[test_index]
logreg = LogisticRegression(penalty=penalty, C=C, max_iter=max_iters)
logreg.fit(trnX, trnY)
prd_trn = logreg.predict(trnX)
prd_tst = logreg.predict(tstX)
cnf_mtx_trn = metrics.confusion_matrix(trnY, prd_trn, labels)
tn_trn, fp_trn, fn_trn, tp_trn = cnf_mtx_trn.ravel()
cnf_mtx_tst = metrics.confusion_matrix(tstY, prd_tst, labels)
matrices[:,:,h] = cnf_mtx_tst
tn_tst, fp_tst, fn_tst, tp_tst = cnf_mtx_tst.ravel()
acc[0] += (tn_trn + tp_trn) / (tn_trn + tp_trn + fp_trn + fn_trn)
acc[1] += (tn_tst + tp_tst) / (tn_tst + tp_tst + fp_tst + fn_tst)
recall[0] += tp_trn / (tp_trn + fn_trn)
recall[1] += tp_tst / (tp_tst + fn_tst)
specificity[0] += tn_trn / (tn_trn + fp_trn)
specificity[1] += tn_tst / (tn_tst + fp_tst)
precision[0] += tp_trn / (tp_trn + fp_trn)
precision[1] += tp_tst / (tp_tst + fp_tst)
h += 1
acc = np.divide(acc, N_SPLITS)
recall = np.divide(recall, N_SPLITS)
specificity = np.divide(specificity, N_SPLITS)
precision = np.divide(precision, N_SPLITS)
cnf_mtx = np.mean(matrices, axis=2).astype('int64')
evaluation = {'Accuracy': acc,
'Recall': recall,
'Specificity': specificity,
'Precision': precision}
return evaluation, cnf_mtx
def log_reg_analysis(X, y):
max_acc = -1
penalty = ['l2', 'none']
C = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
max_iters = [10, 25, 50, 100, 200]
plt.figure()
fig, axs = plt.subplots(1, 2, figsize=(16, 4), squeeze=False)
for p in penalty:
values = {}
for c in C:
acc_values = []
for max_i in max_iters:
evaluation, cnf_mtx = kfold_model(X, y, p, c, max_i)
acc_values.append(evaluation['Accuracy'][1])
if evaluation['Accuracy'][1] > max_acc:
best_model = (p, c, max_i)
best_metrics = evaluation, cnf_mtx
max_acc = evaluation['Accuracy'][1]
values[c] = acc_values
ds.multiple_line_chart(max_iters, values, ax=axs[0, penalty.index(p)], title='Logistic Regression with %s penalty' % p,
xlabel='Maximum Iterations', ylabel='Accuracy', percentage=True, ymin=0.6, ymax=1)
return best_model, evaluation, cnf_mtx
def pretty_metrics(evaluation):
print('\tModel Statistics\n===============================')
for k in evaluation.keys():
line_new = '%12s %.2f %.2f' % (k, evaluation[k][0], evaluation[k][1])
print(line_new)
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
```
<br/>
<br/>
<br/>
<br/>
<br/>
## Scaled (Standardized)
```
data: pd.DataFrame = pd.read_csv('../../datasets/hf_scaled/HF_standardized.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
```
<br/>
<br/>
<br/>
<br/>
<br/>
## Scaled & Outliers with Windsorization
```
data: pd.DataFrame = pd.read_csv('../../datasets/hf_outliers/HR_S_outlierWins.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
```
<br/>
<br/>
<br/>
<br/>
<br/>
## Trimmed Outliers Dataset (Interquartile)
```
data: pd.DataFrame = pd.read_csv('../../datasets/hf_outliers/HR_S_outlierTrim_IQS.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
```
<br/>
<br/>
<br/>
<br/>
<br/>
## Balanced Dataset
```
data: pd.DataFrame = pd.read_csv('../../datasets/hf_balanced/HF_balanced.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
def plot_roc_chart(models: dict, X: np.ndarray, y: np.ndarray, ax: plt.Axes = None, target: str = 'class'):
if ax is None:
ax = plt.gca()
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 1.0)
ax.set_xlabel('FP rate')
ax.set_ylabel('TP rate')
ax.set_title('ROC chart for %s' % target)
ax.plot([0, 1], [0, 1], color='navy', label='random', linewidth=1, linestyle='--', marker='')
for clf in models.keys():
trnX, tstX, trnY, tstY = train_test_split(X, y, train_size=0.7, stratify=y)
lr = LogisticRegression(penalty=models[clf][0], C=models[clf][1], max_iter=models[clf][2])
lr.fit(trnX, trnY)
metrics.plot_roc_curve(lr, tstX, tstY, ax=ax, marker='', linewidth=1)
ax.legend(loc="lower right")
plt.figure()
plot_roc_chart({'Best LogReg Model': model}, X, y, target='class')
plt.show()
```
<br/>
<br/>
<br/>
<br/>
<br/>
## Balanced and Standardized Dataset
```
data: pd.DataFrame = pd.read_csv('../../datasets/HF_balanced_standardized.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
plt.figure()
plot_roc_chart({'Best LogReg Model': model}, X, y, target='class')
plt.show()
```
|
github_jupyter
|
# Load Libs
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
import ds_functions as ds
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.model_selection import KFold, cross_val_score
N_SPLITS = 10
data: pd.DataFrame = pd.read_csv('../../datasets/heart_failure_clinical_records_dataset.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
def kfold_model(X, y, penalty, C, max_iters):
# Compute the Average metrics and CNFMTX for a set of hyper-parameters
kf = KFold(n_splits = N_SPLITS, shuffle=True)
acc = [0, 0]
recall = [0, 0]
specificity = [0, 0]
precision = [0, 0]
matrices = np.zeros((2, 2, N_SPLITS))
h = 0
for train_index, test_index in kf.split(X):
trnX, tstX = X[train_index], X[test_index]
trnY, tstY = y[train_index], y[test_index]
logreg = LogisticRegression(penalty=penalty, C=C, max_iter=max_iters)
logreg.fit(trnX, trnY)
prd_trn = logreg.predict(trnX)
prd_tst = logreg.predict(tstX)
cnf_mtx_trn = metrics.confusion_matrix(trnY, prd_trn, labels)
tn_trn, fp_trn, fn_trn, tp_trn = cnf_mtx_trn.ravel()
cnf_mtx_tst = metrics.confusion_matrix(tstY, prd_tst, labels)
matrices[:,:,h] = cnf_mtx_tst
tn_tst, fp_tst, fn_tst, tp_tst = cnf_mtx_tst.ravel()
acc[0] += (tn_trn + tp_trn) / (tn_trn + tp_trn + fp_trn + fn_trn)
acc[1] += (tn_tst + tp_tst) / (tn_tst + tp_tst + fp_tst + fn_tst)
recall[0] += tp_trn / (tp_trn + fn_trn)
recall[1] += tp_tst / (tp_tst + fn_tst)
specificity[0] += tn_trn / (tn_trn + fp_trn)
specificity[1] += tn_tst / (tn_tst + fp_tst)
precision[0] += tp_trn / (tp_trn + fp_trn)
precision[1] += tp_tst / (tp_tst + fp_tst)
h += 1
acc = np.divide(acc, N_SPLITS)
recall = np.divide(recall, N_SPLITS)
specificity = np.divide(specificity, N_SPLITS)
precision = np.divide(precision, N_SPLITS)
cnf_mtx = np.mean(matrices, axis=2).astype('int64')
evaluation = {'Accuracy': acc,
'Recall': recall,
'Specificity': specificity,
'Precision': precision}
return evaluation, cnf_mtx
def log_reg_analysis(X, y):
max_acc = -1
penalty = ['l2', 'none']
C = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
max_iters = [10, 25, 50, 100, 200]
plt.figure()
fig, axs = plt.subplots(1, 2, figsize=(16, 4), squeeze=False)
for p in penalty:
values = {}
for c in C:
acc_values = []
for max_i in max_iters:
evaluation, cnf_mtx = kfold_model(X, y, p, c, max_i)
acc_values.append(evaluation['Accuracy'][1])
if evaluation['Accuracy'][1] > max_acc:
best_model = (p, c, max_i)
best_metrics = evaluation, cnf_mtx
max_acc = evaluation['Accuracy'][1]
values[c] = acc_values
ds.multiple_line_chart(max_iters, values, ax=axs[0, penalty.index(p)], title='Logistic Regression with %s penalty' % p,
xlabel='Maximum Iterations', ylabel='Accuracy', percentage=True, ymin=0.6, ymax=1)
return best_model, evaluation, cnf_mtx
def pretty_metrics(evaluation):
print('\tModel Statistics\n===============================')
for k in evaluation.keys():
line_new = '%12s %.2f %.2f' % (k, evaluation[k][0], evaluation[k][1])
print(line_new)
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
data: pd.DataFrame = pd.read_csv('../../datasets/hf_scaled/HF_standardized.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
data: pd.DataFrame = pd.read_csv('../../datasets/hf_outliers/HR_S_outlierWins.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
data: pd.DataFrame = pd.read_csv('../../datasets/hf_outliers/HR_S_outlierTrim_IQS.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
data: pd.DataFrame = pd.read_csv('../../datasets/hf_balanced/HF_balanced.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
def plot_roc_chart(models: dict, X: np.ndarray, y: np.ndarray, ax: plt.Axes = None, target: str = 'class'):
if ax is None:
ax = plt.gca()
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 1.0)
ax.set_xlabel('FP rate')
ax.set_ylabel('TP rate')
ax.set_title('ROC chart for %s' % target)
ax.plot([0, 1], [0, 1], color='navy', label='random', linewidth=1, linestyle='--', marker='')
for clf in models.keys():
trnX, tstX, trnY, tstY = train_test_split(X, y, train_size=0.7, stratify=y)
lr = LogisticRegression(penalty=models[clf][0], C=models[clf][1], max_iter=models[clf][2])
lr.fit(trnX, trnY)
metrics.plot_roc_curve(lr, tstX, tstY, ax=ax, marker='', linewidth=1)
ax.legend(loc="lower right")
plt.figure()
plot_roc_chart({'Best LogReg Model': model}, X, y, target='class')
plt.show()
data: pd.DataFrame = pd.read_csv('../../datasets/HF_balanced_standardized.csv')
y: np.ndarray = data.pop('DEATH_EVENT').values
X: np.ndarray = data.values
labels = pd.unique(y)
labels = pd.unique(y)
if(labels[0] == 1):
temp = labels[0]
labels[0] = labels[1]
labels[1] = temp
model, evaluation, cnf_mtx = log_reg_analysis(X, y)
fig, axs = plt.subplots(1, 2, figsize=(2 * ds.HEIGHT, ds.HEIGHT))
ds.multiple_bar_chart(['Train', 'Test'], evaluation, ax=axs[0], title="Model's performance over Train and Test sets")
ds.plot_confusion_matrix(cnf_mtx, labels, ax=axs[1])
pretty_metrics(evaluation)
plt.figure()
plot_roc_chart({'Best LogReg Model': model}, X, y, target='class')
plt.show()
| 0.589126 | 0.904819 |
# Setting UP
```
# Import BeautifulSoup
from bs4 import BeautifulSoup
from splinter import Browser
# Set the executable path and initialize the chrome browser
# ----------------------MAC-----------------------------------------
#executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
#browser = Browser('chrome', **executable_path)
# ======================Windows=====================================
executable_path = {'executable_path': './chromedriver.exe'}
browser = Browser('chrome', **executable_path)
```
# Visit the NASA mars NEW SITES
```
# Visit the mars nasa new site
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
news_soup = BeautifulSoup(html, 'html.parser')
# print(news_soup)
# slide element everythin in the
# <ul class="item_list">
# <li class="slide">
# ....
# </ul>
slide_element = news_soup.select_one('ul.item_list li.slide')
slide_element.find("div", class_ = "content_title")
# Use the parent element to find the first a tag and save it as news_title
news_title = slide_element.find('div', class_ = "content_title").get_text()
news_title
news_paragraph = slide_element.find('div', class_ = "article_teaser_body").get_text()
news_paragraph
```
# JPL SPACE IMAGES FEATURED IMAGE
```
# Visit URL
executable_path = {'executable_path': './chromedriver.exe'}
browser = Browser('chrome', **executable_path)
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
# Asking splinter to go to the site hit a button with class name full_image
# <button class="full_image">Full Image</button>
full_image_button = browser.find_by_id('full_image')
full_image_button.click()
# Find the more info button and click that
browser.is_element_present_by_text('more info', wait_time = 1)
more_info_element = browser.find_link_by_partial_text('more info')
more_info_element.click()
# Parse the results html with soup
html = browser.html
image_soup = BeautifulSoup(html, 'html.parser')
img_url = image_soup.select_one('figure.lede a img').get('src')
img_url
# Use the base url to create an absolute url
img_url = f'https://www.jpl.nasa.gov{img_url}'
img_url
```
# MARS WEATHER
```
executable_path = {'executable_path': './chromedriver.exe'}
browser = Browser('chrome', **executable_path)
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
html = browser.html
weather_soup = BeautifulSoup(html, 'html.parser')
# First find a tweet with the data-name `Mars Weather`
mars_weather_tweet = weather_soup.find('div',
attrs = {
"class": "tweet",
"data-name": "Mars Weather"
})
print(mars_weather_tweet)
# Next search within the tweet for p tag containing the tweet text
mars_weather = mars_weather_tweet.find('p', 'tweet-text').get_text()
mars_weather
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
hemisphere_image_urls = []
# First get a list og all the hemisphers
links = browser.find_by_css('a.product-item h3')
for item in range(len(links)):
hemisphere = {}
# We have to find the element on each loop to avoid a stale element exception
browser.find_by_css('a.product-item h3')[item].click()
# Next we find the Sample Image anchor tage and extract the href
sample_element = browser.find_link_by_text('Sample').first
hemisphere['img_url'] = sample_element['href']
# Get Hemispher title
hemisphere['title'] = browser.find_by_css('h2.title').text
#Append hemispher object to list
hemisphere_image_urls.append(hemisphere)
# Finally, we navigate backwards
browser.back()
hemisphere_image_urls
```
# MARS FACTS
```
import pandas as pd
df = pd.read_html('https://space-facts.com/mars/')[0]
print(df)
df.columns = ['description', 'value']
df.set_index('description', inplace = True)
df
df.to_html()
browser.quit()
```
|
github_jupyter
|
# Import BeautifulSoup
from bs4 import BeautifulSoup
from splinter import Browser
# Set the executable path and initialize the chrome browser
# ----------------------MAC-----------------------------------------
#executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
#browser = Browser('chrome', **executable_path)
# ======================Windows=====================================
executable_path = {'executable_path': './chromedriver.exe'}
browser = Browser('chrome', **executable_path)
# Visit the mars nasa new site
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
news_soup = BeautifulSoup(html, 'html.parser')
# print(news_soup)
# slide element everythin in the
# <ul class="item_list">
# <li class="slide">
# ....
# </ul>
slide_element = news_soup.select_one('ul.item_list li.slide')
slide_element.find("div", class_ = "content_title")
# Use the parent element to find the first a tag and save it as news_title
news_title = slide_element.find('div', class_ = "content_title").get_text()
news_title
news_paragraph = slide_element.find('div', class_ = "article_teaser_body").get_text()
news_paragraph
# Visit URL
executable_path = {'executable_path': './chromedriver.exe'}
browser = Browser('chrome', **executable_path)
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
# Asking splinter to go to the site hit a button with class name full_image
# <button class="full_image">Full Image</button>
full_image_button = browser.find_by_id('full_image')
full_image_button.click()
# Find the more info button and click that
browser.is_element_present_by_text('more info', wait_time = 1)
more_info_element = browser.find_link_by_partial_text('more info')
more_info_element.click()
# Parse the results html with soup
html = browser.html
image_soup = BeautifulSoup(html, 'html.parser')
img_url = image_soup.select_one('figure.lede a img').get('src')
img_url
# Use the base url to create an absolute url
img_url = f'https://www.jpl.nasa.gov{img_url}'
img_url
executable_path = {'executable_path': './chromedriver.exe'}
browser = Browser('chrome', **executable_path)
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
html = browser.html
weather_soup = BeautifulSoup(html, 'html.parser')
# First find a tweet with the data-name `Mars Weather`
mars_weather_tweet = weather_soup.find('div',
attrs = {
"class": "tweet",
"data-name": "Mars Weather"
})
print(mars_weather_tweet)
# Next search within the tweet for p tag containing the tweet text
mars_weather = mars_weather_tweet.find('p', 'tweet-text').get_text()
mars_weather
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
hemisphere_image_urls = []
# First get a list og all the hemisphers
links = browser.find_by_css('a.product-item h3')
for item in range(len(links)):
hemisphere = {}
# We have to find the element on each loop to avoid a stale element exception
browser.find_by_css('a.product-item h3')[item].click()
# Next we find the Sample Image anchor tage and extract the href
sample_element = browser.find_link_by_text('Sample').first
hemisphere['img_url'] = sample_element['href']
# Get Hemispher title
hemisphere['title'] = browser.find_by_css('h2.title').text
#Append hemispher object to list
hemisphere_image_urls.append(hemisphere)
# Finally, we navigate backwards
browser.back()
hemisphere_image_urls
import pandas as pd
df = pd.read_html('https://space-facts.com/mars/')[0]
print(df)
df.columns = ['description', 'value']
df.set_index('description', inplace = True)
df
df.to_html()
browser.quit()
| 0.314261 | 0.364466 |
## The Delegation Run
If classes are objects what is the difference between types and instances?
When I talk about "my cat" I am referring to a concrete instance of the "cat" concept, which is a _subtype_ of "animal". So, despite being both objects, while types can be _specialized_, instances cannot.
Usually an object B is said to be a specialization of an object A when:
* B has all the features of A
* B can provide new features
* B can perform some or all the tasks performed by A in a different way
Those targets are very general and valid for any system and the key to achieve them with the maximum reuse of already existing components is _delegation_. Delegation means that an object shall perform only what it knows best, and leave the rest to other objects.
Delegation can be implemented with two different mechanisms: _composition_ and _inheritance_. Sadly, very often only inheritance is listed among the pillars of OOP techniques, forgetting that it is an implementation of the more generic and fundamental mechanism of delegation; perhaps a better nomenclature for the two techniques could be _explicit delegation_ (composition) and _implicit delegation_ (inheritance).
Please note that, again, when talking about composition and inheritance we are talking about focusing on a behavioural or structural delegation. Another way to think about the difference between composition and inheritance is to consider if the object _knows_ who can satisfy your request or if the object _is_ the one that satisfy the request.
**Please, please, please do not forget composition**: in many cases, composition can lead to simpler systems, with benefits on maintainability and changeability.
Usually composition is said to be a very generic technique that needs no special syntax, while inheritance and its rules are strongly dependent on the language of choice. Actually, the strong dynamic nature of Python softens the boundary line between the two techniques.
## Inheritance Now
In Python a class can be declared as an _extension_ of one or more different classes, through the _class inheritance_ mechanism. The child class (the one that inherits) has the same internal structure of the parent class (the one that is inherited), and for the case of multiple inheritance the language has very specific rules to manage possible conflicts or redefinitions among the parent classes. A very simple example of inheritance is
```
class Door:
colour = 'brown'
def __init__(self, number, status):
self.number = number
self.status = status
@classmethod
def knock(cls):
print("Knock!")
@classmethod
def paint(cls, colour):
cls.colour = colour
def open(self):
self.status = 'open'
def close(self):
self.status = 'closed'
class SecurityDoor(Door):
pass
```
where we declare a new class `SecurityDoor` that, at the moment, is a perfect copy of the `Door` class. Let us investigate what happens when we access attributes and methods. First we instance the class
```
sdoor = SecurityDoor(1, 'closed')
```
The first check we can do is that class attributes are still global and shared
```
print(SecurityDoor.colour is Door.colour)
print(sdoor.colour is Door.colour)
```
This shows us that Python tries to resolve instance members not only looking into the class the instance comes from, but also investigating the parent classes. In this case `sdoor.colour` becomes `SecurityDoor.colour`, that in turn becomes `Door.colour`. `SecurityDoor` _is_ a `Door`.
If we investigate the content of `__dict__` we can catch a glimpse of the inheritance mechanism in action
```
print(sdoor.__dict__)
print(type(sdoor.__class__.__dict__))
print(sdoor.__class__.__dict__)
print(type(Door.__dict__))
print(Door.__dict__)
```
As you can see the content of `__dict__` for `SecurityDoor` is very narrow compared to that of `Door`. The inheritance mechanism takes care of the missing elements by climbing up the classes tree. Where does Python get the parent classes? A class always contains a `__bases__` tuple that lists them
```
print(SecurityDoor.__bases__)
```
So an example of what Python does to resolve a class method call through the inheritance tree is
```
print(sdoor.__class__.__bases__[0].__dict__['knock'].__get__(sdoor))
print(sdoor.knock)
```
Please note that this is just an example that does not consider multiple inheritance.
Let us try now to override some methods and attributes. In Python you can _override_ (redefine) a parent class member simply by redefining it in the child class.
```
class SecurityDoor(Door):
colour = 'gray'
locked = True
def open(self):
if not self.locked:
self.status = 'open'
```
As you can forecast, the overridden members now are present in the `__dict__` of the `SecurityDoor` class
```
print(type(SecurityDoor.__dict__))
print(SecurityDoor.__dict__)
```
So when you override a member, the one you put in the child class is used instead of the one in the parent class simply because the former is found before the latter while climbing the class hierarchy. This also shows you that Python does not implicitly call the parent implementation when you override a method. So, overriding is a way to block implicit delegation.
If we want to call the parent implementation we have to do it explicitly. In the former example we could write
```
class SecurityDoor(Door):
colour = 'gray'
locked = True
def open(self):
if self.locked:
return
Door.open(self)
```
You can easily test that this implementation is working correctly.
```
sdoor = SecurityDoor(1, 'closed')
print(sdoor.status)
sdoor.open()
print(sdoor.status)
sdoor.locked = False
sdoor.open()
print(sdoor.status)
```
This form of explicit parent delegation is heavily discouraged, however.
The first reason is because of the very high coupling that results from explicitly naming the parent class again when calling the method. _Coupling_, in the computer science lingo, means to link two parts of a system, so that changes in one of them directly affect the other one, and is usually avoided as much as possible. In this case if you decide to use a new parent class you have to manually propagate the change to every method that calls it. Moreover, since in Python the class hierarchy can be dynamically changed (i.e. at runtime), this form of explicit delegation could be not only annoying but also wrong.
The second reason is that in general you need to deal with multiple inheritance, where you do not know a priori which parent class implements the original form of the method you are overriding.
To solve these issues, Python supplies the `super()` built-in function, that climbs the class hierarchy and returns the correct class that shall be called. The syntax for calling `super()` is
```
class SecurityDoor(Door):
colour = 'gray'
locked = True
def open(self):
if self.locked:
return
super().open()
```
The output of `super()` is not exactly the `Door` class. It returns a `super` object which representation is `<super: <class 'SecurityDoor'>, <SecurityDoor object>>`. This object however acts like the parent class, so you can safely ignore its custom nature and use it just like you would do with the `Door` class in this case.
## Enter the Composition
Composition means that an object knows another object, and explicitly delegates some tasks to it. While inheritance is implicit, composition is explicit: in Python, however, things are far more interesting than this =).
First of all let us implement classic composition, which simply makes an object part of the other as an attribute
```
class SecurityDoor:
colour = 'gray'
locked = True
def __init__(self, number, status):
self.door = Door(number, status)
def open(self):
if self.locked:
return
self.door.open()
def close(self):
self.door.close()
```
The primary goal of composition is to relax the coupling between objects. This little example shows that now `SecurityDoor` is an `object` and no more a `Door`, which means that the internal structure of `Door` is not copied. For this very simple example both `Door` and `SecurityDoor` are not big classes, but in a real system objects can very complex; this means that their allocation consumes a lot of memory and if a system contains thousands or millions of objects that could be an issue.
The composed `SecurityDoor` has to redefine the `colour` attribute since the concept of delegation applies only to methods and not to attributes, doesn't it?
Well, no. Python provides a very high degree of indirection for objects manipulation and attribute access is one of the most useful. As you already discovered, accessing attributes is ruled by a special method called `__getattribute__()` that is called whenever an attribute of the object is accessed. Overriding `__getattribute__()`, however, is overkill; it is a very complex method, and, being called on every attribute access, any change makes the whole thing slower.
The method we have to leverage to delegate attribute access is `__getattr__()`, which is a special method that is called whenever the requested attribute is not found in the object. So basically it is the right place to dispatch all attribute and method access our object cannot handle. The previous example becomes
```
class SecurityDoor:
locked = True
def __init__(self, number, status):
self.door = Door(number, status)
def open(self):
if self.locked:
return
self.door.open()
def __getattr__(self, attr):
return getattr(self.door, attr)
```
Using `__getattr__()` blends the separation line between inheritance and composition since after all the former is a form of automatic delegation of every member access.
```
class ComposedDoor:
def __init__(self, number, status):
self.door = Door(number, status)
def __getattr__(self, attr):
return getattr(self.door, attr)
```
As this last example shows, delegating every member access through `__getattr__()` is very simple. Pay attention to `getattr()` which is different from `__getattr__()`. The former is a built-in that is equivalent to the dotted syntax, i.e. `getattr(obj, 'someattr')` is the same as `obj.someattr`, but you have to use it since the name of the attribute is contained in a string.
Composition provides a superior way to manage delegation since it can selectively delegate the access, even mask some attributes or methods, while inheritance cannot. In Python you also avoid the memory problems that might arise when you put many objects inside another; Python handles everything through its reference, i.e. through a pointer to the memory position of the thing, so the size of an attribute is constant and very limited.
## Movie Trivia
Section titles come from the following movies: _The Cannonball Run (1981)_, _Apocalypse Now (1979)_, _Enter the Dragon (1973)_.
## Sources
You will find a lot of documentation in [this Reddit post](http://www.reddit.com/r/Python/comments/226ahl/some_links_about_python_oop/). Most of the information contained in this series come from those sources.
## Feedback
Feel free to use [the blog Google+ page](https://plus.google.com/u/0/b/110554719587236016835/110554719587236016835/posts) to comment the post. The [GitHub issues](https://github.com/lgiordani/lgiordani.github.com/issues) page is the best place to submit corrections.
|
github_jupyter
|
class Door:
colour = 'brown'
def __init__(self, number, status):
self.number = number
self.status = status
@classmethod
def knock(cls):
print("Knock!")
@classmethod
def paint(cls, colour):
cls.colour = colour
def open(self):
self.status = 'open'
def close(self):
self.status = 'closed'
class SecurityDoor(Door):
pass
sdoor = SecurityDoor(1, 'closed')
print(SecurityDoor.colour is Door.colour)
print(sdoor.colour is Door.colour)
print(sdoor.__dict__)
print(type(sdoor.__class__.__dict__))
print(sdoor.__class__.__dict__)
print(type(Door.__dict__))
print(Door.__dict__)
print(SecurityDoor.__bases__)
print(sdoor.__class__.__bases__[0].__dict__['knock'].__get__(sdoor))
print(sdoor.knock)
class SecurityDoor(Door):
colour = 'gray'
locked = True
def open(self):
if not self.locked:
self.status = 'open'
print(type(SecurityDoor.__dict__))
print(SecurityDoor.__dict__)
class SecurityDoor(Door):
colour = 'gray'
locked = True
def open(self):
if self.locked:
return
Door.open(self)
sdoor = SecurityDoor(1, 'closed')
print(sdoor.status)
sdoor.open()
print(sdoor.status)
sdoor.locked = False
sdoor.open()
print(sdoor.status)
class SecurityDoor(Door):
colour = 'gray'
locked = True
def open(self):
if self.locked:
return
super().open()
class SecurityDoor:
colour = 'gray'
locked = True
def __init__(self, number, status):
self.door = Door(number, status)
def open(self):
if self.locked:
return
self.door.open()
def close(self):
self.door.close()
class SecurityDoor:
locked = True
def __init__(self, number, status):
self.door = Door(number, status)
def open(self):
if self.locked:
return
self.door.open()
def __getattr__(self, attr):
return getattr(self.door, attr)
class ComposedDoor:
def __init__(self, number, status):
self.door = Door(number, status)
def __getattr__(self, attr):
return getattr(self.door, attr)
| 0.558568 | 0.967839 |
# Training an Encrypted Neural Network
In this tutorial, we will walk through an example of how we can train a neural network with CrypTen. This is particularly relevant for the <i>Feature Aggregation</i>, <i>Data Labeling</i> and <i>Data Augmentation</i> use cases. We will focus on the usual two-party setting and show how we can train an accurate neural network for digit classification on the MNIST data.
For concreteness, this tutorial will step through the <i>Feature Aggregation</i> use cases: Alice and Bob each have part of the features of the data set, and wish to train a neural network on their combined data, while keeping their data private.
## Setup
As usual, we'll begin by importing and initializing the `crypten` and `torch` libraries.
We will use the MNIST dataset to demonstrate how Alice and Bob can learn without revealing protected information. For reference, the feature size of each example in the MNIST data is `28 x 28`. Let's assume Alice has the first `28 x 20` features and Bob has last `28 x 8` features. One way to think of this split is that Alice has the (roughly) top 2/3rds of each image, while Bob has the bottom 1/3rd of each image. We'll again use our helper script `mnist_utils.py` that downloads the publicly available MNIST data, and splits the data as required.
For simplicity, we will restrict our problem to binary classification: we'll simply learn how to distinguish between 0 and non-zero digits. For speed of execution in the notebook, we will only create a dataset of a 100 examples.
```
import crypten
import torch
crypten.init()
torch.set_num_threads(1)
%run ./mnist_utils.py --option features --reduced 100 --binary
```
Next, we'll define the network architecture below, and then describe how to train it on encrypted data in the next section.
```
import torch.nn as nn
import torch.nn.functional as F
#Define an example network
class ExampleNet(nn.Module):
def __init__(self):
super(ExampleNet, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=0)
self.fc1 = nn.Linear(16 * 12 * 12, 100)
self.fc2 = nn.Linear(100, 2) # For binary classification, final layer needs only 2 outputs
def forward(self, x):
out = self.conv1(x)
out = F.relu(out)
out = F.max_pool2d(out, 2)
out = out.view(-1, 16 * 12 * 12)
out = self.fc1(out)
out = F.relu(out)
out = self.fc2(out)
return out
```
## Encrypted Training
After all the material we've covered in earlier tutorials, we only need to know a few additional items for encrypted training. We'll first discuss how the training loop in CrypTen differs from PyTorch. Then, we'll go through a complete example to illustrate training on encrypted data from end-to-end.
### How does CrypTen training differ from PyTorch training?
There are two main ways implementing a CrypTen training loop differs from a PyTorch training loop. We'll describe these items first, and then illustrate them with small examples below.
<i>(1) Use one-hot encoding</i>: CrypTen training requires all labels to use one-hot encoding. This means that when using standard datasets such as MNIST, we need to modify the labels to use one-hot encoding.
<i>(2) Directly update parameters</i>: CrypTen does not use the PyTorch optimizers. Instead, CrypTen implements encrypted SGD by implementing its own `backward` function, followed by directly updating the parameters. As we will see below, using SGD in CrypTen is very similar to using the PyTorch optimizers.
We now show some small examples to illustrate these differences. As before, we will assume Alice has the rank 0 process and Bob has the rank 1 process.
```
# Define source argument values for Alice and Bob
ALICE = 0
BOB = 1
# Load Alice's data
data_alice_enc = crypten.load_from_party('/tmp/alice_train.pth', src=ALICE)
# We'll now set up the data for our small example below
# For illustration purposes, we will create toy data
# and encrypt all of it from source ALICE
x_small = torch.rand(100, 1, 28, 28)
y_small = torch.randint(1, (100,))
# Transform labels into one-hot encoding
label_eye = torch.eye(2)
y_one_hot = label_eye[y_small]
# Transform all data to CrypTensors
x_train = crypten.cryptensor(x_small, src=ALICE)
y_train = crypten.cryptensor(y_one_hot)
# Instantiate and encrypt a CrypTen model
model_plaintext = ExampleNet()
dummy_input = torch.empty(1, 1, 28, 28)
model = crypten.nn.from_pytorch(model_plaintext, dummy_input)
model.encrypt()
# Example: Stochastic Gradient Descent in CrypTen
model.train() # Change to training mode
loss = crypten.nn.MSELoss() # Choose loss functions
# Set parameters: learning rate, num_epochs
learning_rate = 0.001
num_epochs = 2
# Train the model: SGD on encrypted data
for i in range(num_epochs):
# forward pass
output = model(x_train)
loss_value = loss(output, y_train)
# set gradients to zero
model.zero_grad()
# perform backward pass
loss_value.backward()
# update parameters
model.update_parameters(learning_rate)
# examine the loss after each epoch
print("Epoch: {0:d} Loss: {1:.4f}".format(i, loss_value.get_plain_text()))
```
### A Complete Example
We now put these pieces together for a complete example of training a network in a multi-party setting.
As in Tutorial 3, we'll assume Alice has the rank 0 process, and Bob has the rank 1 process; so we'll load and encrypt Alice's data with `src=0`, and load and encrypt Bob's data with `src=1`. We'll then initialize a plaintext model and convert it to an encrypted model, just as we did in Tutorial 4. We'll finally define our loss function, training parameters, and run SGD on the encrypted data. For the purposes of this tutorial we train on 100 samples; training should complete in ~3 minutes per epoch.
```
import crypten.mpc as mpc
import crypten.communicator as comm
# Convert labels to one-hot encoding
# Since labels are public in this use case, we will simply use them from loaded torch tensors
labels = torch.load('/tmp/train_labels.pth')
labels = labels.long()
labels_one_hot = label_eye[labels]
@mpc.run_multiprocess(world_size=2)
def run_encrypted_training():
# Load data:
x_alice_enc = crypten.load_from_party('/tmp/alice_train.pth', src=ALICE)
x_bob_enc = crypten.load_from_party('/tmp/bob_train.pth', src=BOB)
crypten.print(x_alice_enc.size())
crypten.print(x_bob_enc.size())
# Combine the feature sets: identical to Tutorial 3
x_combined_enc = crypten.cat([x_alice_enc, x_bob_enc], dim=2)
# Reshape to match the network architecture
x_combined_enc = x_combined_enc.unsqueeze(1)
# Initialize a plaintext model and convert to CrypTen model
pytorch_model = ExampleNet()
model = crypten.nn.from_pytorch(pytorch_model, dummy_input)
model.encrypt()
"""
# Set train mode
model.train()
# Define a loss function
loss = crypten.nn.MSELoss()
# Define training parameters
learning_rate = 0.001
num_epochs = 2
batch_size = 10
num_batches = x_combined_enc.size(0) // batch_size
rank = comm.get().get_rank()
for i in range(num_epochs):
crypten.print(f"Epoch {i} in progress:")
for batch in range(num_batches):
# define the start and end of the training mini-batch
start, end = batch * batch_size, (batch + 1) * batch_size
# construct CrypTensors out of training examples / labels
x_train = x_combined_enc[start:end]
y_batch = labels_one_hot[start:end]
y_train = crypten.cryptensor(y_batch, requires_grad=True)
# perform forward pass:
output = model(x_train)
loss_value = loss(output, y_train)
# set gradients to "zero"
model.zero_grad()
# perform backward pass:
loss_value.backward()
# update parameters
model.update_parameters(learning_rate)
# Print progress every batch:
batch_loss = loss_value.get_plain_text()
crypten.print(f"\tBatch {(batch + 1)} of {num_batches} Loss {batch_loss.item():.4f}")
"""
run_encrypted_training()
```
We see that the average batch loss decreases across the epochs, as we expect during training.
This completes our tutorial. Before exiting this tutorial, please clean up the files generated using the following code.
```
import os
filenames = ['/tmp/alice_train.pth',
'/tmp/bob_train.pth',
'/tmp/alice_test.pth',
'/tmp/bob_test.pth',
'/tmp/train_labels.pth',
'/tmp/test_labels.pth']
for fn in filenames:
if os.path.exists(fn): os.remove(fn)
```
|
github_jupyter
|
import crypten
import torch
crypten.init()
torch.set_num_threads(1)
%run ./mnist_utils.py --option features --reduced 100 --binary
import torch.nn as nn
import torch.nn.functional as F
#Define an example network
class ExampleNet(nn.Module):
def __init__(self):
super(ExampleNet, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=0)
self.fc1 = nn.Linear(16 * 12 * 12, 100)
self.fc2 = nn.Linear(100, 2) # For binary classification, final layer needs only 2 outputs
def forward(self, x):
out = self.conv1(x)
out = F.relu(out)
out = F.max_pool2d(out, 2)
out = out.view(-1, 16 * 12 * 12)
out = self.fc1(out)
out = F.relu(out)
out = self.fc2(out)
return out
# Define source argument values for Alice and Bob
ALICE = 0
BOB = 1
# Load Alice's data
data_alice_enc = crypten.load_from_party('/tmp/alice_train.pth', src=ALICE)
# We'll now set up the data for our small example below
# For illustration purposes, we will create toy data
# and encrypt all of it from source ALICE
x_small = torch.rand(100, 1, 28, 28)
y_small = torch.randint(1, (100,))
# Transform labels into one-hot encoding
label_eye = torch.eye(2)
y_one_hot = label_eye[y_small]
# Transform all data to CrypTensors
x_train = crypten.cryptensor(x_small, src=ALICE)
y_train = crypten.cryptensor(y_one_hot)
# Instantiate and encrypt a CrypTen model
model_plaintext = ExampleNet()
dummy_input = torch.empty(1, 1, 28, 28)
model = crypten.nn.from_pytorch(model_plaintext, dummy_input)
model.encrypt()
# Example: Stochastic Gradient Descent in CrypTen
model.train() # Change to training mode
loss = crypten.nn.MSELoss() # Choose loss functions
# Set parameters: learning rate, num_epochs
learning_rate = 0.001
num_epochs = 2
# Train the model: SGD on encrypted data
for i in range(num_epochs):
# forward pass
output = model(x_train)
loss_value = loss(output, y_train)
# set gradients to zero
model.zero_grad()
# perform backward pass
loss_value.backward()
# update parameters
model.update_parameters(learning_rate)
# examine the loss after each epoch
print("Epoch: {0:d} Loss: {1:.4f}".format(i, loss_value.get_plain_text()))
import crypten.mpc as mpc
import crypten.communicator as comm
# Convert labels to one-hot encoding
# Since labels are public in this use case, we will simply use them from loaded torch tensors
labels = torch.load('/tmp/train_labels.pth')
labels = labels.long()
labels_one_hot = label_eye[labels]
@mpc.run_multiprocess(world_size=2)
def run_encrypted_training():
# Load data:
x_alice_enc = crypten.load_from_party('/tmp/alice_train.pth', src=ALICE)
x_bob_enc = crypten.load_from_party('/tmp/bob_train.pth', src=BOB)
crypten.print(x_alice_enc.size())
crypten.print(x_bob_enc.size())
# Combine the feature sets: identical to Tutorial 3
x_combined_enc = crypten.cat([x_alice_enc, x_bob_enc], dim=2)
# Reshape to match the network architecture
x_combined_enc = x_combined_enc.unsqueeze(1)
# Initialize a plaintext model and convert to CrypTen model
pytorch_model = ExampleNet()
model = crypten.nn.from_pytorch(pytorch_model, dummy_input)
model.encrypt()
"""
# Set train mode
model.train()
# Define a loss function
loss = crypten.nn.MSELoss()
# Define training parameters
learning_rate = 0.001
num_epochs = 2
batch_size = 10
num_batches = x_combined_enc.size(0) // batch_size
rank = comm.get().get_rank()
for i in range(num_epochs):
crypten.print(f"Epoch {i} in progress:")
for batch in range(num_batches):
# define the start and end of the training mini-batch
start, end = batch * batch_size, (batch + 1) * batch_size
# construct CrypTensors out of training examples / labels
x_train = x_combined_enc[start:end]
y_batch = labels_one_hot[start:end]
y_train = crypten.cryptensor(y_batch, requires_grad=True)
# perform forward pass:
output = model(x_train)
loss_value = loss(output, y_train)
# set gradients to "zero"
model.zero_grad()
# perform backward pass:
loss_value.backward()
# update parameters
model.update_parameters(learning_rate)
# Print progress every batch:
batch_loss = loss_value.get_plain_text()
crypten.print(f"\tBatch {(batch + 1)} of {num_batches} Loss {batch_loss.item():.4f}")
"""
run_encrypted_training()
import os
filenames = ['/tmp/alice_train.pth',
'/tmp/bob_train.pth',
'/tmp/alice_test.pth',
'/tmp/bob_test.pth',
'/tmp/train_labels.pth',
'/tmp/test_labels.pth']
for fn in filenames:
if os.path.exists(fn): os.remove(fn)
| 0.873822 | 0.992047 |
# Evaluating a Given Layer of a Tensorflow Checkpoint
```
from aix360.algorithms.profwt import print_layer_labels
from aix360.algorithms.profwt import fully_connected
import json
import numpy as np
import tensorflow as tf
import os
#Obtain parent directory for acccessing various data files.
parent_dir = '../../aix360/models/profwt'
```
## Define a path for the tensorflow checkpoint of a pre-trained complex model
```
checkpoint_path = os.path.join(parent_dir, "checkpoints/train_resnetmodel_new1_799.ckpt")
```
## Load the Dataset on which Layer outputs need to be evaluated.
```
with open(parent_dir+'/data/data_files/cifar-10-train1-image.json') as file:
x_train1=json.load(file)
file.close()
x_train1=np.array(x_train1)
with open(parent_dir+'/data/data_files/cifar-10-train1-label.json') as file:
y_train1=json.load(file)
file.close()
y_train1=np.array(y_train1)
```
## Define a filename where you want layer output to be saved.
```
run=1
to_save_filename=parent_dir+'/data/probe_run'+str(run)+'.npy'
print(to_save_filename)
```
## Print Names of all Layers from the model in the checkpoint
```
attach_probe_checkpoint.print_layer_labels(checkpoint_path)
tf.reset_default_graph()
```
Identify tensor names corresponding to a) Layer whose output of interest
b) Input layer where the model takes in image/data sample c) Layer where model takes in the labels to fit.
```
#Fixing a specific operation_name to define the layer output
operation_name='unit_1_1/sub_add/add:0'
# In this case the probe is intended to be after the second Resnet Block in 18 layer Resnet for CIFAR-10
input_features_name='Placeholder:0'
label_name='Placeholder_1:0'
#These two correspond to Placeholder tensors for Feature input and label
```
## Tapping the Layer Output, Evaluating and Storing it in a File
attach_probe_eval() function loads a tensorflow checkpoint from a path, takes these inputs: a) Layer name to tap into b) Placeholder Tensor name corresponding to feature input x and c) Placeholder Tensor name corresponding to Label y d) Filename to save the layer outputs e) Data samples to evaulate the layer outputs on.
```
pr=attach_probe_checkpoint.attach_probe_eval(input_features_name,label_name,operation_name,x_train1,y_train1,checkpoint_path)
np.save(to_save_filename,pr)
```
# Training a Logistic Probe Classifier based on Layer Outputs
## Load the Layer Output File for which Probe Classifier needs to be trained.
```
# In this script, samples for probe training and probe confidence evaluations are done
# on the layer outputs obtained from the same dataset. Load the layer output values from the file.
#In general, it can be made different by supplying a new y_train2 and probe_eval_input
y_train2=y_train1
probe_train_input=np.load(parent_dir+'/data/probe_run1.npy')
probe_eval_input=probe_train_input
```
## Supply Filenames to save Probe Classifier Model, Model Confidences
```
run=1
num_classes=10
to_save_pred_filename=parent_dir+"/data/probe_pred_run"+str(run)+'.npy'
to_save_probe_model_filename=parent_dir+"/data/probe_model_run"+str(run)
```
## Train a Probe Classifier, Evaluate it on Layer Outputs from a Dataset,
## Store the probe confidences in a File.
```
(log,pred)=train_probes.probe_train_eval(probe_train_input,y_train1,num_classes,probe_eval_input,y_train2,to_save_probe_model_filename)
np.save(to_save_pred_filename,pred)
```
# Simple Model Training - Unweighted on the Dataset
```
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
import os
```
Import the ProfWeight Explainer Class.
```
from profwexplainer import ProfweightExplainer
from resnet_keras_model import resnet_v1,lr_schedule,HParams
```
Open the file constaining the training dataset for training the simple model. This file could be (In this example it is different) different from the dataset used for training the complex model.
```
with open(parent_dir+'/data/data_files/cifar-10-train2-image.json') as file:
x_train2=json.load(file)
file.close()
x_train2=np.array(x_train2)
with open(parent_dir+'/data/data_files/cifar-10-train2-label.json') as file:
y_train2=json.load(file)
file.close()
y_train2=np.array(y_train2)
# print ("x_train2 shape",x_train2.shape)
# print ("y_train2 shape",y_train2.shape)
with open(parent_dir+'/data/data_files/cifar-10-test-image.json') as file:
x_test=json.load(file)
file.close()
x_test=np.array(x_test)
with open(parent_dir+'/data/data_files/cifar-10-test-label.json') as file:
y_test=json.load(file)
file.close()
y_test=np.array(y_test)
print('x_train shape:', x_train2.shape)
print('y_train shape:', y_train2.shape)
```
Specify checkpoint to save the model after training the simple model on x_train2,y_train2 dataset.
```
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'resnet_target_model_unweighted.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
```
Specify Learning Rate Schedule and all the hyper parameters for training. In this example, these are recommended setting from a popular Keras implementation of resnet models for CIFAR-10.
```
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),cooldown=0,patience=5,min_lr=0.5e-6)
hps = HParams(lr_scheduler=lr_scheduler,lr_reducer=lr_reducer,batch_size=128,epochs=200,checkpoint_path=filepath,num_classes=10,complexity_param=1,optimizer=Adam(lr=lr_schedule(0)))
```
ProfWeightExplainer Class has a fit function that trains a simple model using a provided keras model that is built by calling the resnet_v1 function specified in the model file resnet_keras_model.py
```
a=ProfweightExplainer()
m=a.fit(x_train2,y_train2,x_test,y_test,resnet_v1,hps,'neural_keras')
print("Initial Simple Model Accuracy:",m[1])
```
# Simple Model training with Prof Weight- Sample Weights Obtained from Probe Confidences of Various Layers.
List of all filenames - each of which contains the probe confidences of a specific layer corresponding to the samples in x_train2,y_train2 dataset. This is assumed to have been obtained using functions in attach_probe_checkpoint.py and train_probes.py.
```
list_probe_filenames=[parent_dir+'/data/probe_output/probe_2_out_pred'+str(x)+'.npy' for x in range(10,17)]
```
Specify a new checkpoint for the simple model with Prof Weight + set identical hyper parameters for learning rate schedule and training.
```
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'resnet_target_model_weighted.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
hps = HParams(lr_scheduler=lr_scheduler,lr_reducer=lr_reducer,batch_size=128,epochs=200,checkpoint_path=filepath,num_classes=10,complexity_param=1,optimizer=Adam(lr=lr_schedule(0)))
```
Call the ProfWeight Explainer Class's explain function - This is same as the fit function but additionally specifies list of probe filenames and start and end layer whose confidences need to be averaged to be used as the sample weights. This explain function also scores the new simple model obtained after weighted training on the test data set.
```
a.explain(x_train2,y_train2,x_test,y_test,resnet_v1,hps,list_probe_filenames,2,6,'neural_keras')
```
|
github_jupyter
|
from aix360.algorithms.profwt import print_layer_labels
from aix360.algorithms.profwt import fully_connected
import json
import numpy as np
import tensorflow as tf
import os
#Obtain parent directory for acccessing various data files.
parent_dir = '../../aix360/models/profwt'
checkpoint_path = os.path.join(parent_dir, "checkpoints/train_resnetmodel_new1_799.ckpt")
with open(parent_dir+'/data/data_files/cifar-10-train1-image.json') as file:
x_train1=json.load(file)
file.close()
x_train1=np.array(x_train1)
with open(parent_dir+'/data/data_files/cifar-10-train1-label.json') as file:
y_train1=json.load(file)
file.close()
y_train1=np.array(y_train1)
run=1
to_save_filename=parent_dir+'/data/probe_run'+str(run)+'.npy'
print(to_save_filename)
attach_probe_checkpoint.print_layer_labels(checkpoint_path)
tf.reset_default_graph()
#Fixing a specific operation_name to define the layer output
operation_name='unit_1_1/sub_add/add:0'
# In this case the probe is intended to be after the second Resnet Block in 18 layer Resnet for CIFAR-10
input_features_name='Placeholder:0'
label_name='Placeholder_1:0'
#These two correspond to Placeholder tensors for Feature input and label
pr=attach_probe_checkpoint.attach_probe_eval(input_features_name,label_name,operation_name,x_train1,y_train1,checkpoint_path)
np.save(to_save_filename,pr)
# In this script, samples for probe training and probe confidence evaluations are done
# on the layer outputs obtained from the same dataset. Load the layer output values from the file.
#In general, it can be made different by supplying a new y_train2 and probe_eval_input
y_train2=y_train1
probe_train_input=np.load(parent_dir+'/data/probe_run1.npy')
probe_eval_input=probe_train_input
run=1
num_classes=10
to_save_pred_filename=parent_dir+"/data/probe_pred_run"+str(run)+'.npy'
to_save_probe_model_filename=parent_dir+"/data/probe_model_run"+str(run)
(log,pred)=train_probes.probe_train_eval(probe_train_input,y_train1,num_classes,probe_eval_input,y_train2,to_save_probe_model_filename)
np.save(to_save_pred_filename,pred)
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
import os
from profwexplainer import ProfweightExplainer
from resnet_keras_model import resnet_v1,lr_schedule,HParams
with open(parent_dir+'/data/data_files/cifar-10-train2-image.json') as file:
x_train2=json.load(file)
file.close()
x_train2=np.array(x_train2)
with open(parent_dir+'/data/data_files/cifar-10-train2-label.json') as file:
y_train2=json.load(file)
file.close()
y_train2=np.array(y_train2)
# print ("x_train2 shape",x_train2.shape)
# print ("y_train2 shape",y_train2.shape)
with open(parent_dir+'/data/data_files/cifar-10-test-image.json') as file:
x_test=json.load(file)
file.close()
x_test=np.array(x_test)
with open(parent_dir+'/data/data_files/cifar-10-test-label.json') as file:
y_test=json.load(file)
file.close()
y_test=np.array(y_test)
print('x_train shape:', x_train2.shape)
print('y_train shape:', y_train2.shape)
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'resnet_target_model_unweighted.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),cooldown=0,patience=5,min_lr=0.5e-6)
hps = HParams(lr_scheduler=lr_scheduler,lr_reducer=lr_reducer,batch_size=128,epochs=200,checkpoint_path=filepath,num_classes=10,complexity_param=1,optimizer=Adam(lr=lr_schedule(0)))
a=ProfweightExplainer()
m=a.fit(x_train2,y_train2,x_test,y_test,resnet_v1,hps,'neural_keras')
print("Initial Simple Model Accuracy:",m[1])
list_probe_filenames=[parent_dir+'/data/probe_output/probe_2_out_pred'+str(x)+'.npy' for x in range(10,17)]
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'resnet_target_model_weighted.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
hps = HParams(lr_scheduler=lr_scheduler,lr_reducer=lr_reducer,batch_size=128,epochs=200,checkpoint_path=filepath,num_classes=10,complexity_param=1,optimizer=Adam(lr=lr_schedule(0)))
a.explain(x_train2,y_train2,x_test,y_test,resnet_v1,hps,list_probe_filenames,2,6,'neural_keras')
| 0.46223 | 0.886862 |
# Code stuff - not slides!
```
%run ../ML_plots.ipynb
```
# Session 13:
## Supervised learning, part 2
*Andreas Bjerre-Nielsen*
## Agenda
1. [model building](#Model-building)
1. [model selection](#Model-selection)
- [cross validation](#Cross-validation)
- [tools for selection](#Tools-for-model-selection)
1. [dimensionality reduction](#Dimensionality-reduction)
1. [measures for classification](#Measures-for-classification)
## Vaaaamos
```
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
plt.style.use('default') # set style (colors, background, size, gridlines etc.)
plt.rcParams['figure.figsize'] = 10, 4 # set default size of plots
plt.rcParams.update({'font.size': 18})
```
## Supervised problems (1)
*What is the tradeoff for making supervised regression models?*
```
# f_bias_var['regression'][2]
```
## Supervised problems (2)
*What was a remedy to overfitting in linear models? How do we measure overfitting?*
Regularization
- Too many irrelevant features - solved by L1 regularization ~ lasso
- Exploding coefficients - solved by L2 regularization ~ ridge
# Model building
## Model pipelines (1)
*Is there a smart way to build ML models?*
Yes, we build a pipeline:
- Preprocessing data
- Standard: adding polynomials, imputation, rescaling
- Unsupervised learning (more info..)
- Supervised learning
## Model pipelines (2)
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_01.png' alt="Drawing" style="width: 900px;"/></center>
## Model pipelines (3)
*What are the advantages of using a pipeline?*
- Ensures good practice - we only fit on training data.
- Much less code!
## Applying a model pipeline (1)
*What would this look like in Python?*
```
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
pipe_preproc = make_pipeline(PolynomialFeatures(),
StandardScaler())
print(pipe_preproc.steps[0])
print(pipe_preproc.steps[1])
```
## Applying a model pipeline (2)
*Let's some load Boston house price data*
```
print('\n'.join(load_boston()['DESCR'].split('\n')[13:28]))
```
## Applying a model pipeline (3)
*And how do I apply the pipe on the data?*
```
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
X = load_boston().data
y = load_boston().target
print(load_boston().feature_names)
# splitting into train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y)
# apply preproc - fit on train
pipe_preproc.fit(X_train) # fit to training
X_train_prep = pipe_preproc.transform(X_train) # transform training data
X_test_prep = pipe_preproc.transform(X_test) # transform test data
```
## Applying a model pipeline (4)
*And how do I apply the pipe on the data?*
```
# THE PIPE APPLIED
# apply preproc - fit on train
pipe_preproc = make_pipeline(PolynomialFeatures(),
StandardScaler())
pipe_preproc.fit(X_train) # fit to training
X_train_prep = pipe_preproc.transform(X_train) # transform training data
X_test_prep = pipe_preproc.transform(X_test) # transform test data
# WITHOUT PIPE
poly_trans = PolynomialFeatures()
scaler = StandardScaler()
X_train_poly = poly_trans.fit_transform(X_train)
X_test_poly = poly_trans.fit_transform(X_test)
scaler.fit(X_train_poly)
X_train_prep_alt = scaler.transform(X_train_poly)
X_test_prep_alt = scaler.transform(X_test_poly)
```
# Model selection
## Measuring the problem
*Does machine learning work out of the box?*
- In some cases ML works quite well out of the box.
- Often ML requires making careful choices.
- Note that automated machine learning packages and services exist.
*Which choices are to be made?*
- We need to pick model building **hyperparameters**.
- E.g. $\lambda$ for Lasso, Ridge.
## Model validation (1)
*How do we measure our model's performance for different hyperparameters?*
- Remember we cannot use the test set.
*Could we somehow mimick what we do with test data?*
- Yes, we can split the remaining non-test data into training and validation data:
- we train model for various hyperparameters on training data;
- pick the hyperparameters which performs best on validation data.
## Model validation (2)
*The non-test data is split into training and validation*
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_02.png' alt="Drawing" style="width: 950px;"/></center>
## Model validation (3)
*What would this look like in Python?*
```
# splitting into development (2/3) and test data (1/3)
X_dev, X_test, y_dev, y_test = train_test_split(X, y, test_size=1/3, random_state=1)
# splitting development into train (1/3) and validation (1/3)
X_train, X_val, y_train, y_val = train_test_split(X_dev, y_dev, test_size=1/2, random_state=1)
```
## Model validation (4)
Let's train a linear regression model
```
from sklearn.linear_model import Lasso, LinearRegression
pipe_lr = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
LinearRegression())
pipe_lr.fit(X_dev, y_dev)
```
## Model validation (5)
Let's find the Lasso model which performs best in the validation set
```
from sklearn.metrics import mean_squared_error as mse
perform = []
lambdas = np.logspace(-4, 4, 33)
for lambda_ in lambdas:
pipe_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=lambda_, random_state=1))
pipe_lasso.fit(X_train, y_train)
y_pred = pipe_lasso.predict(X_val)
perform.append(mse(y_pred, y_val))
hyperparam_perform = pd.Series(perform,index=lambdas)
optimal = hyperparam_perform.nsmallest(1)
print(optimal)
```
## Model validation (6)
Let's compare the performance of the Lasso vs. Linear Regression
```
pipe_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=optimal.index[0]))
pipe_lasso.fit(X_dev,y_dev)
print('Lasso', round(mse(pipe_lasso.predict(X_test),y_test), 3))
print('LinReg', round(mse(pipe_lr.predict(X_test),y_test), 3))
```
## Bias and variance (1)
*How do we describe the modelling error?*
From [Wikipedia Sunday, August 19, 2018](https://en.wikipedia.org/wiki/Bias%E2%80%93variance_tradeoff):
- model **bias**: _an error from erroneous assumptions in the learning algorithm_
- oversimplification of models, cannot approximate all patterns found
- model **variance**: _an error from sensitivity to small fluctuations in the training set_
- reacts too much to sample errors and thus finds too many spurious relations
## Bias and variance (2)
- **over fitting**: low bias / high variance
- traning our model captures all patterns but we also find some irrelevant
- examples: Decision Trees, Support Vector Machines or Neural Networks
- **under fitting**: high bias / low variance
- traning our model captures all patterns but we also find some irrelevant
- examples: linear and logistic regression (without polynomial expansion)
## Bias and variance (3)
*Not so fast.. OLS is unbiased, right?*
Yes, OLS is unbiased. But ..
- Requires we know the true form of the model.
*What happens if we introduce regularization?*
- Then model is no longer unbiased.
## Smarter validation
*Is this approach the smartest way for deciding on choice of hyperparameters?*
# NO
Our model choice depends a lot on which sample we pick. Could we use more of the data?
# Cross validation
## The holdout method
*How do we got the more out of the data?*
We reuse the data in the development set repeatedly
- We test on all the data
- Rotate which parts of data is used for test and train.
## Leave-one-out CV
*How do we got the most of the data?*
The most robust approach
- Each single observation in the training data we use the remaining data to train.
- Makes number of models equal to the number of observations
- Very computing intensive - does not scale!
LOOCV
## K fold method (1)
*How do balance computing time vs. overfitting?*
We split the sample into $K$ even sized test bins.
- For each test bin $k$ we use the remaining data for training.
Advantages:
- We use all our data for testing.
- Training is done with 100-(100/K) pct. of the data, i.e. 90 pct. for K=10.
## K fold method (2)
In K-fold cross validation we average the errors.
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_03.png' alt="Drawing" style="width: 1100px;"/></center>
## K fold method (3)
*How would we use K-fold cross validation to select our model?*
```
from sklearn.model_selection import KFold
kfolds = KFold(n_splits=10)
mseCV = []
for lambda_ in lambdas:
pipe_lassoCV = make_pipeline(PolynomialFeatures(degree=3, include_bias=False),
StandardScaler(),
Lasso(alpha=lambda_, random_state=1))
mseCV_ = []
for train_idx, val_idx in kfolds.split(X_dev, y_dev):
X_train, y_train, = X_dev[train_idx], y_dev[train_idx]
X_val, y_val = X_dev[val_idx], y_dev[val_idx]
pipe_lassoCV.fit(X_train, y_train)
mseCV_.append(mse(pipe_lassoCV.predict(X_val), y_val))
mseCV.append(mseCV_)
```
## K fold method (4)
```
optimalCV = pd.DataFrame(mseCV, index=lambdas).mean(axis=1).nsmallest(1)
pipe_lassoCV = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=optimalCV.index[0], random_state=1))
pipe_lassoCV.fit(X_dev,y_dev)
model_pipes = ('Lasso', pipe_lasso), ('Lasso CV', pipe_lassoCV),('LinReg', pipe_lr)
for model_name, model_pipe in model_pipes:
score = mse(model_pipe.predict(X_test),y_test)
print(model_name, round(score, 1))
```
## Learning curves (1)
*What does a balanced model look like?*
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_04.png' alt="Drawing" style="width: 600px;"/></center>
## Learning curves (2)
```
from sklearn.model_selection import learning_curve
train_sizes, train_scores, test_scores = \
learning_curve(estimator=pipe_lasso,
X=X_train,
y=y_train,
train_sizes=np.linspace(0.1, 1.0, 10),
scoring='neg_mean_squared_error',
cv=3)
mse_ = pd.DataFrame({'Train':-train_scores.mean(axis=1),
'Test':-test_scores.mean(axis=1)})\
.set_index(pd.Index(train_sizes,name='sample size'))
print(mse_.head())
```
## Learning curves (3)
```
f_learn, ax = plt.subplots(figsize=(10,4))
mse_.plot(ax=ax, logy=True)
ax.fill_between(train_sizes,
-train_scores.mean(1) + train_scores.std(1)*1.96,
-train_scores.mean(1) - train_scores.std(1)*1.96,
alpha=0.25,
color='orange')
ax.set_ylabel('Mean squared error')
```
# Tools for model selection
## Validation curves (1)
```
from sklearn.model_selection import validation_curve
train_scores, test_scores = \
validation_curve(estimator=pipe_lasso,
X=X_train,
y=y_train,
param_name='lasso__alpha',
param_range=lambdas,
scoring='neg_mean_squared_error',
cv=3)
mse_score = pd.DataFrame({'Train':-train_scores.mean(axis=1),
'Validation':-test_scores.mean(axis=1),
'lambda':lambdas})\
.set_index('lambda')
print(mse_score.Test.nsmallest(1))
```
## Validation curves (2)
```
mse_score.plot(logx=True, logy=True)
```
## Grid search (1)
*How do we search for two or more optimal parameters?*
- Goal: find the optimal parameter combination: $$\lambda_1^*,\lambda_2^*=\arg\min_{\lambda_1,\lambda_2}MSE^{CV}(X_{train},y_{train})$$
- Option 1: We can loop over the joint grid of parameters.
- One level for each parameter.
- Caveats:
- Option 2: sklearn has `GridSearchCV` has a tool which tests all parameter combinations.
## Grid search (2)
*How does this look in Python?*
```
from sklearn.model_selection import GridSearchCV
gs = GridSearchCV(estimator=pipe_lasso,
param_grid={'lasso__alpha':lambdas},
scoring='neg_mean_squared_error',
cv=10)
gs = gs.fit(X_train, y_train)
gs.best_params_
```
- Notation: double underscore between estimator and hyperparameter, e.g. 'est__hyperparam'
- Scoring: negative MSE as we're maximizing the score ~ minimize MSE.
## Grid search (3)
*What if we have 10,000 parameter combinations?*
- Option 1: you buy a cluster on Amazon, learn how to parallelize across computers. Intro in last lecture.
- Option 2: you drop some of the parameter values
- Option 3: `RandomizedSearchCV` searches a subset of the combinations.
## Enhancing cross validation (1)
- Model validation does not consider that we are also tuning hyperparameters:
- Leads too overfitting (Varma & Simon 2006; Cawley, Talbot 2010).
- Solution is **nested cross validation**.
- Validation step should not be modelled as 1) train; 2) test.
- Better way is 1) model selection: train, validate; 2) test.
- Implement as pp 204-205 in Python for Machine Learning:
- first inner loop: `GridSearchCV`
- second outer loop: `cross_val_score`
## Enhancing cross validation (1)
*Cross-val. suffers from the fact that it models test-train*
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_07.png' alt="Drawing" style="width: 700px;"/></center>
# Dimensionality reduction
## Principal components analysis (1)
*How can we reducing the number of features?*
One solution is finding the **principal components**.
- essence: we get **fewer features** of **greater importance**.
- the new features are:
- *uncorrelated* (i.e. linearly independent, orthogonal)
- ordered so decreasing in how much variation of the feature data they explain
The method is called **principal components analysis**
- corresponds to eigen decomposition of matrix into
- principal eigenvectors (factors)
- principal eigenvalues (factor importance)
## Principal components analysis (2)
Finding principal components for two features. Notice:
- The factors are orthogonal
- The first factor explains more variation, |PC1| > |PC2|
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch05/images/05_01.png' alt="Drawing" style="width: 600px;"/></center>
## Principal components analysis (3)
We can plot the explained variation against the component indices, often called scree plot.
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch05/images/05_02.png' alt="Drawing" style="width: 700px;"/></center>
## Principal components analysis (4)
*How do we choose the number of components?*
- Standard is to look for an *elbow* in the previous scree plot.
*What might go wrong about this approach??*
- The number of feautures should be a hyperparameter in the model building!!!
## Principal components analysis (5)
*How does this look in Python?*
```
from sklearn.decomposition import PCA
pipe_pca_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
PCA(),
Lasso())
gs = GridSearchCV(estimator=pipe_pca_lasso,
param_grid={'lasso__alpha':lambdas,
'pca__n_components':range(1, X_train.shape[1]+1)},
scoring='neg_mean_squared_error',
cv=10)
gs = gs.fit(X_train, y_train)
gs.best_params_
```
# Measures for classification
## Breakdown by error type (1)
We measure the accaracy as the rate of true predictions, i.e. $$ACC=\frac{TP+TN}{TP+TN+FP+FN}=\frac{True}{True+False}$$
where our measures are
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_08.png' alt="Drawing" style="width: 400px;"/></center>
## Breakdown by error type (2)
Some powerful measures:
- Precision: share of predicted positive that are true
- PRE = $\frac{TP}{TP+FP}$
- Recall: share of actual positive that are true
- REC = $\frac{TP}{TP+FN}=\frac{TP}{AP}$
- F1: mix recall and precision: $\frac{2\cdot PRE\cdot REC}{PRE+ REC}$
```
from sklearn.metrics import precision_score, recall_score, f1_score
```
## Breakdown by error type (3)
Classification models provide a predicted likelihood of being in the class or not:
- Receiver Operating Characteristic (ROC) curve by varying thresholds for predicted true.
- ROC is a *theoretical* measure of model performance based on probabilities.
- AUC: Area Under the (ROC) Curve.
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_10.png' alt="Drawing" style="width: 800px;"/></center>
# The end
[Return to agenda](#Agenda)
|
github_jupyter
|
%run ../ML_plots.ipynb
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
plt.style.use('default') # set style (colors, background, size, gridlines etc.)
plt.rcParams['figure.figsize'] = 10, 4 # set default size of plots
plt.rcParams.update({'font.size': 18})
# f_bias_var['regression'][2]
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
pipe_preproc = make_pipeline(PolynomialFeatures(),
StandardScaler())
print(pipe_preproc.steps[0])
print(pipe_preproc.steps[1])
print('\n'.join(load_boston()['DESCR'].split('\n')[13:28]))
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
X = load_boston().data
y = load_boston().target
print(load_boston().feature_names)
# splitting into train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y)
# apply preproc - fit on train
pipe_preproc.fit(X_train) # fit to training
X_train_prep = pipe_preproc.transform(X_train) # transform training data
X_test_prep = pipe_preproc.transform(X_test) # transform test data
# THE PIPE APPLIED
# apply preproc - fit on train
pipe_preproc = make_pipeline(PolynomialFeatures(),
StandardScaler())
pipe_preproc.fit(X_train) # fit to training
X_train_prep = pipe_preproc.transform(X_train) # transform training data
X_test_prep = pipe_preproc.transform(X_test) # transform test data
# WITHOUT PIPE
poly_trans = PolynomialFeatures()
scaler = StandardScaler()
X_train_poly = poly_trans.fit_transform(X_train)
X_test_poly = poly_trans.fit_transform(X_test)
scaler.fit(X_train_poly)
X_train_prep_alt = scaler.transform(X_train_poly)
X_test_prep_alt = scaler.transform(X_test_poly)
# splitting into development (2/3) and test data (1/3)
X_dev, X_test, y_dev, y_test = train_test_split(X, y, test_size=1/3, random_state=1)
# splitting development into train (1/3) and validation (1/3)
X_train, X_val, y_train, y_val = train_test_split(X_dev, y_dev, test_size=1/2, random_state=1)
from sklearn.linear_model import Lasso, LinearRegression
pipe_lr = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
LinearRegression())
pipe_lr.fit(X_dev, y_dev)
from sklearn.metrics import mean_squared_error as mse
perform = []
lambdas = np.logspace(-4, 4, 33)
for lambda_ in lambdas:
pipe_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=lambda_, random_state=1))
pipe_lasso.fit(X_train, y_train)
y_pred = pipe_lasso.predict(X_val)
perform.append(mse(y_pred, y_val))
hyperparam_perform = pd.Series(perform,index=lambdas)
optimal = hyperparam_perform.nsmallest(1)
print(optimal)
pipe_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=optimal.index[0]))
pipe_lasso.fit(X_dev,y_dev)
print('Lasso', round(mse(pipe_lasso.predict(X_test),y_test), 3))
print('LinReg', round(mse(pipe_lr.predict(X_test),y_test), 3))
from sklearn.model_selection import KFold
kfolds = KFold(n_splits=10)
mseCV = []
for lambda_ in lambdas:
pipe_lassoCV = make_pipeline(PolynomialFeatures(degree=3, include_bias=False),
StandardScaler(),
Lasso(alpha=lambda_, random_state=1))
mseCV_ = []
for train_idx, val_idx in kfolds.split(X_dev, y_dev):
X_train, y_train, = X_dev[train_idx], y_dev[train_idx]
X_val, y_val = X_dev[val_idx], y_dev[val_idx]
pipe_lassoCV.fit(X_train, y_train)
mseCV_.append(mse(pipe_lassoCV.predict(X_val), y_val))
mseCV.append(mseCV_)
optimalCV = pd.DataFrame(mseCV, index=lambdas).mean(axis=1).nsmallest(1)
pipe_lassoCV = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=optimalCV.index[0], random_state=1))
pipe_lassoCV.fit(X_dev,y_dev)
model_pipes = ('Lasso', pipe_lasso), ('Lasso CV', pipe_lassoCV),('LinReg', pipe_lr)
for model_name, model_pipe in model_pipes:
score = mse(model_pipe.predict(X_test),y_test)
print(model_name, round(score, 1))
from sklearn.model_selection import learning_curve
train_sizes, train_scores, test_scores = \
learning_curve(estimator=pipe_lasso,
X=X_train,
y=y_train,
train_sizes=np.linspace(0.1, 1.0, 10),
scoring='neg_mean_squared_error',
cv=3)
mse_ = pd.DataFrame({'Train':-train_scores.mean(axis=1),
'Test':-test_scores.mean(axis=1)})\
.set_index(pd.Index(train_sizes,name='sample size'))
print(mse_.head())
f_learn, ax = plt.subplots(figsize=(10,4))
mse_.plot(ax=ax, logy=True)
ax.fill_between(train_sizes,
-train_scores.mean(1) + train_scores.std(1)*1.96,
-train_scores.mean(1) - train_scores.std(1)*1.96,
alpha=0.25,
color='orange')
ax.set_ylabel('Mean squared error')
from sklearn.model_selection import validation_curve
train_scores, test_scores = \
validation_curve(estimator=pipe_lasso,
X=X_train,
y=y_train,
param_name='lasso__alpha',
param_range=lambdas,
scoring='neg_mean_squared_error',
cv=3)
mse_score = pd.DataFrame({'Train':-train_scores.mean(axis=1),
'Validation':-test_scores.mean(axis=1),
'lambda':lambdas})\
.set_index('lambda')
print(mse_score.Test.nsmallest(1))
mse_score.plot(logx=True, logy=True)
from sklearn.model_selection import GridSearchCV
gs = GridSearchCV(estimator=pipe_lasso,
param_grid={'lasso__alpha':lambdas},
scoring='neg_mean_squared_error',
cv=10)
gs = gs.fit(X_train, y_train)
gs.best_params_
from sklearn.decomposition import PCA
pipe_pca_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
PCA(),
Lasso())
gs = GridSearchCV(estimator=pipe_pca_lasso,
param_grid={'lasso__alpha':lambdas,
'pca__n_components':range(1, X_train.shape[1]+1)},
scoring='neg_mean_squared_error',
cv=10)
gs = gs.fit(X_train, y_train)
gs.best_params_
from sklearn.metrics import precision_score, recall_score, f1_score
| 0.630685 | 0.9357 |
# Code stuff - not slides!
```
%run ../ML_plots.ipynb
```
# Session 13:
## Supervised learning, part 2
*Andreas Bjerre-Nielsen*
## Agenda
1. [model building](#Model-building)
1. [model selection](#Model-selection)
- [cross validation](#Cross-validation)
- [tools for selection](#Tools-for-model-selection)
1. [dimensionality reduction](#Dimensionality-reduction)
1. [measures for classification](#Measures-for-classification)
## Vaaaamos
```
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
plt.style.use('default') # set style (colors, background, size, gridlines etc.)
plt.rcParams['figure.figsize'] = 10, 4 # set default size of plots
plt.rcParams.update({'font.size': 18})
```
## Supervised problems (1)
*What is the tradeoff for making supervised regression models?*
```
# f_bias_var['regression'][2]
```
## Supervised problems (2)
*What was a remedy to overfitting in linear models? How do we measure overfitting?*
Regularization
- Too many irrelevant features - solved by L1 regularization ~ lasso
- Exploding coefficients - solved by L2 regularization ~ ridge
# Model building
## Model pipelines (1)
*Is there a smart way to build ML models?*
Yes, we build a pipeline:
- Preprocessing data
- Standard: adding polynomials, imputation, rescaling
- Unsupervised learning (more info..)
- Supervised learning
## Model pipelines (2)
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_01.png' alt="Drawing" style="width: 900px;"/></center>
## Model pipelines (3)
*What are the advantages of using a pipeline?*
- Ensures good practice - we only fit on training data.
- Much less code!
## Applying a model pipeline (1)
*What would this look like in Python?*
```
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
pipe_preproc = make_pipeline(PolynomialFeatures(),
StandardScaler())
print(pipe_preproc.steps[0])
print(pipe_preproc.steps[1])
```
## Applying a model pipeline (2)
*Let's some load Boston house price data*
```
print('\n'.join(load_boston()['DESCR'].split('\n')[13:28]))
```
## Applying a model pipeline (3)
*And how do I apply the pipe on the data?*
```
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
X = load_boston().data
y = load_boston().target
print(load_boston().feature_names)
# splitting into train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y)
# apply preproc - fit on train
pipe_preproc.fit(X_train) # fit to training
X_train_prep = pipe_preproc.transform(X_train) # transform training data
X_test_prep = pipe_preproc.transform(X_test) # transform test data
```
## Applying a model pipeline (4)
*And how do I apply the pipe on the data?*
```
# THE PIPE APPLIED
# apply preproc - fit on train
pipe_preproc = make_pipeline(PolynomialFeatures(),
StandardScaler())
pipe_preproc.fit(X_train) # fit to training
X_train_prep = pipe_preproc.transform(X_train) # transform training data
X_test_prep = pipe_preproc.transform(X_test) # transform test data
# WITHOUT PIPE
poly_trans = PolynomialFeatures()
scaler = StandardScaler()
X_train_poly = poly_trans.fit_transform(X_train)
X_test_poly = poly_trans.fit_transform(X_test)
scaler.fit(X_train_poly)
X_train_prep_alt = scaler.transform(X_train_poly)
X_test_prep_alt = scaler.transform(X_test_poly)
```
# Model selection
## Measuring the problem
*Does machine learning work out of the box?*
- In some cases ML works quite well out of the box.
- Often ML requires making careful choices.
- Note that automated machine learning packages and services exist.
*Which choices are to be made?*
- We need to pick model building **hyperparameters**.
- E.g. $\lambda$ for Lasso, Ridge.
## Model validation (1)
*How do we measure our model's performance for different hyperparameters?*
- Remember we cannot use the test set.
*Could we somehow mimick what we do with test data?*
- Yes, we can split the remaining non-test data into training and validation data:
- we train model for various hyperparameters on training data;
- pick the hyperparameters which performs best on validation data.
## Model validation (2)
*The non-test data is split into training and validation*
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_02.png' alt="Drawing" style="width: 950px;"/></center>
## Model validation (3)
*What would this look like in Python?*
```
# splitting into development (2/3) and test data (1/3)
X_dev, X_test, y_dev, y_test = train_test_split(X, y, test_size=1/3, random_state=1)
# splitting development into train (1/3) and validation (1/3)
X_train, X_val, y_train, y_val = train_test_split(X_dev, y_dev, test_size=1/2, random_state=1)
```
## Model validation (4)
Let's train a linear regression model
```
from sklearn.linear_model import Lasso, LinearRegression
pipe_lr = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
LinearRegression())
print(pipe_lr.fit(X_dev, y_dev))
#pipe_lr.coef_
lin_reg = LinearRegression()
lin_reg.fit(X_dev,y_dev)
lin_reg.predict(X_dev)
lin_reg.coef_
test = pd.DataFrame(X_dev)
test.dtypes
test.head()
```
## Model validation (5)
Let's find the Lasso model which performs best in the validation set
```
from sklearn.metrics import mean_squared_error as mse
perform = []
lambdas = np.logspace(-4, 4, 33)
for lambda_ in lambdas:
pipe_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=lambda_, random_state=1))
pipe_lasso.fit(X_train, y_train)
y_pred = pipe_lasso.predict(X_val)
perform.append(mse(y_pred, y_val))
hyperparam_perform = pd.Series(perform,index=lambdas)
optimal = hyperparam_perform.nsmallest(1)
print(optimal)
```
## Model validation (6)
Let's compare the performance of the Lasso vs. Linear Regression
```
pipe_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=optimal.index[0]))
pipe_lasso.fit(X_dev,y_dev)
print('Lasso', round(mse(pipe_lasso.predict(X_test),y_test), 3))
print('LinReg', round(mse(pipe_lr.predict(X_test),y_test), 3))
```
## Bias and variance (1)
*How do we describe the modelling error?*
From [Wikipedia Sunday, August 19, 2018](https://en.wikipedia.org/wiki/Bias%E2%80%93variance_tradeoff):
- model **bias**: _an error from erroneous assumptions in the learning algorithm_
- oversimplification of models, cannot approximate all patterns found
- model **variance**: _an error from sensitivity to small fluctuations in the training set_
- reacts too much to sample errors and thus finds too many spurious relations
## Bias and variance (2)
- **over fitting**: low bias / high variance
- traning our model captures all patterns but we also find some irrelevant
- examples: Decision Trees, Support Vector Machines or Neural Networks
- **under fitting**: high bias / low variance
- traning our model captures all patterns but we also find some irrelevant
- examples: linear and logistic regression (without polynomial expansion)
## Bias and variance (3)
*Not so fast.. OLS is unbiased, right?*
Yes, OLS is unbiased. But ..
- Requires we know the true form of the model.
*What happens if we introduce regularization?*
- Then model is no longer unbiased.
## Smarter validation
*Is this approach the smartest way for deciding on choice of hyperparameters?*
# NO
Our model choice depends a lot on which sample we pick. Could we use more of the data?
# Cross validation
## The holdout method
*How do we got the more out of the data?*
We reuse the data in the development set repeatedly
- We test on all the data
- Rotate which parts of data is used for test and train.
## Leave-one-out CV
*How do we got the most of the data?*
The most robust approach
- Each single observation in the training data we use the remaining data to train.
- Makes number of models equal to the number of observations
- Very computing intensive - does not scale!
LOOCV
## K fold method (1)
*How do balance computing time vs. overfitting?*
We split the sample into $K$ even sized test bins.
- For each test bin $k$ we use the remaining data for training.
Advantages:
- We use all our data for testing.
- Training is done with 100-(100/K) pct. of the data, i.e. 90 pct. for K=10.
## K fold method (2)
In K-fold cross validation we average the errors.
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_03.png' alt="Drawing" style="width: 1100px;"/></center>
## K fold method (3)
*How would we use K-fold cross validation to select our model?*
```
from sklearn.model_selection import KFold
kfolds = KFold(n_splits=10)
mseCV = []
for lambda_ in lambdas:
pipe_lassoCV = make_pipeline(PolynomialFeatures(degree=3, include_bias=False),
StandardScaler(),
Lasso(alpha=lambda_, random_state=1))
mseCV_ = []
for train_idx, val_idx in kfolds.split(X_dev, y_dev):
X_train, y_train, = X_dev[train_idx], y_dev[train_idx]
X_val, y_val = X_dev[val_idx], y_dev[val_idx]
pipe_lassoCV.fit(X_train, y_train)
mseCV_.append(mse(pipe_lassoCV.predict(X_val), y_val))
mseCV.append(mseCV_)
```
## K fold method (4)
```
optimalCV = pd.DataFrame(mseCV, index=lambdas).mean(axis=1).nsmallest(1)
pipe_lassoCV = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=optimalCV.index[0], random_state=1))
pipe_lassoCV.fit(X_dev,y_dev)
model_pipes = ('Lasso', pipe_lasso), ('Lasso CV', pipe_lassoCV),('LinReg', pipe_lr)
for model_name, model_pipe in model_pipes:
score = mse(model_pipe.predict(X_test),y_test)
print(model_name, round(score, 1))
```
## Learning curves (1)
*What does a balanced model look like?*
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_04.png' alt="Drawing" style="width: 600px;"/></center>
## Learning curves (2)
```
from sklearn.model_selection import learning_curve
train_sizes, train_scores, test_scores = \
learning_curve(estimator=pipe_lasso,
X=X_train,
y=y_train,
train_sizes=np.linspace(0.1, 1.0, 10),
scoring='neg_mean_squared_error',
cv=3)
mse_ = pd.DataFrame({'Train':-train_scores.mean(axis=1),
'Test':-test_scores.mean(axis=1)})\
.set_index(pd.Index(train_sizes,name='sample size'))
print(mse_.head())
```
## Learning curves (3)
```
f_learn, ax = plt.subplots(figsize=(10,4))
mse_.plot(ax=ax, logy=True)
ax.fill_between(train_sizes,
-train_scores.mean(1) + train_scores.std(1)*1.96,
-train_scores.mean(1) - train_scores.std(1)*1.96,
alpha=0.25,
color='orange')
ax.set_ylabel('Mean squared error')
```
# Tools for model selection
## Validation curves (1)
```
from sklearn.model_selection import validation_curve
train_scores, test_scores = \
validation_curve(estimator=pipe_lasso,
X=X_train,
y=y_train,
param_name='lasso__alpha',
param_range=lambdas,
scoring='neg_mean_squared_error',
cv=3)
mse_score = pd.DataFrame({'Train':-train_scores.mean(axis=1),
'Validation':-test_scores.mean(axis=1),
'lambda':lambdas})\
.set_index('lambda')
print(mse_score.Test.nsmallest(1))
```
## Validation curves (2)
```
mse_score.plot(logx=True, logy=True)
```
## Grid search (1)
*How do we search for two or more optimal parameters?*
- Goal: find the optimal parameter combination: $$\lambda_1^*,\lambda_2^*=\arg\min_{\lambda_1,\lambda_2}MSE^{CV}(X_{train},y_{train})$$
- Option 1: We can loop over the joint grid of parameters.
- One level for each parameter.
- Caveats:
- Option 2: sklearn has `GridSearchCV` has a tool which tests all parameter combinations.
## Grid search (2)
*How does this look in Python?*
```
from sklearn.model_selection import GridSearchCV
gs = GridSearchCV(estimator=pipe_lasso,
param_grid={'lasso__alpha':lambdas},
scoring='neg_mean_squared_error',
cv=10)
gs = gs.fit(X_train, y_train)
gs.best_params_
```
- Notation: double underscore between estimator and hyperparameter, e.g. 'est__hyperparam'
- Scoring: negative MSE as we're maximizing the score ~ minimize MSE.
## Grid search (3)
*What if we have 10,000 parameter combinations?*
- Option 1: you buy a cluster on Amazon, learn how to parallelize across computers. Intro in last lecture.
- Option 2: you drop some of the parameter values
- Option 3: `RandomizedSearchCV` searches a subset of the combinations.
## Enhancing cross validation (1)
- Model validation does not consider that we are also tuning hyperparameters:
- Leads too overfitting (Varma & Simon 2006; Cawley, Talbot 2010).
- Solution is **nested cross validation**.
- Validation step should not be modelled as 1) train; 2) test.
- Better way is 1) model selection: train, validate; 2) test.
- Implement as pp 204-205 in Python for Machine Learning:
- first inner loop: `GridSearchCV`
- second outer loop: `cross_val_score`
## Enhancing cross validation (1)
*Cross-val. suffers from the fact that it models test-train*
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_07.png' alt="Drawing" style="width: 700px;"/></center>
# Dimensionality reduction
## Principal components analysis (1)
*How can we reducing the number of features?*
One solution is finding the **principal components**.
- essence: we get **fewer features** of **greater importance**.
- the new features are:
- *uncorrelated* (i.e. linearly independent, orthogonal)
- ordered so decreasing in how much variation of the feature data they explain
The method is called **principal components analysis**
- corresponds to eigen decomposition of matrix into
- principal eigenvectors (factors)
- principal eigenvalues (factor importance)
## Principal components analysis (2)
Finding principal components for two features. Notice:
- The factors are orthogonal
- The first factor explains more variation, |PC1| > |PC2|
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch05/images/05_01.png' alt="Drawing" style="width: 600px;"/></center>
## Principal components analysis (3)
We can plot the explained variation against the component indices, often called scree plot.
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch05/images/05_02.png' alt="Drawing" style="width: 700px;"/></center>
## Principal components analysis (4)
*How do we choose the number of components?*
- Standard is to look for an *elbow* in the previous scree plot.
*What might go wrong about this approach??*
- The number of feautures should be a hyperparameter in the model building!!!
## Principal components analysis (5)
*How does this look in Python?*
```
from sklearn.decomposition import PCA
pipe_pca_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
PCA(),
Lasso())
gs = GridSearchCV(estimator=pipe_pca_lasso,
param_grid={'lasso__alpha':lambdas,
'pca__n_components':range(1, X_train.shape[1]+1)},
scoring='neg_mean_squared_error',
cv=10)
gs = gs.fit(X_train, y_train)
gs.best_params_
```
# Measures for classification
## Breakdown by error type (1)
We measure the accaracy as the rate of true predictions, i.e. $$ACC=\frac{TP+TN}{TP+TN+FP+FN}=\frac{True}{True+False}$$
where our measures are
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_08.png' alt="Drawing" style="width: 400px;"/></center>
## Breakdown by error type (2)
Some powerful measures:
- Precision: share of predicted positive that are true
- PRE = $\frac{TP}{TP+FP}$
- Recall: share of actual positive that are true
- REC = $\frac{TP}{TP+FN}=\frac{TP}{AP}$
- F1: mix recall and precision: $\frac{2\cdot PRE\cdot REC}{PRE+ REC}$
```
from sklearn.metrics import precision_score, recall_score, f1_score
```
## Breakdown by error type (3)
Classification models provide a predicted likelihood of being in the class or not:
- Receiver Operating Characteristic (ROC) curve by varying thresholds for predicted true.
- ROC is a *theoretical* measure of model performance based on probabilities.
- AUC: Area Under the (ROC) Curve.
<center><img src='https://github.com/rasbt/python-machine-learning-book-2nd-edition/raw/master/code/ch06/images/06_10.png' alt="Drawing" style="width: 800px;"/></center>
# The end
[Return to agenda](#Agenda)
|
github_jupyter
|
%run ../ML_plots.ipynb
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
plt.style.use('default') # set style (colors, background, size, gridlines etc.)
plt.rcParams['figure.figsize'] = 10, 4 # set default size of plots
plt.rcParams.update({'font.size': 18})
# f_bias_var['regression'][2]
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
pipe_preproc = make_pipeline(PolynomialFeatures(),
StandardScaler())
print(pipe_preproc.steps[0])
print(pipe_preproc.steps[1])
print('\n'.join(load_boston()['DESCR'].split('\n')[13:28]))
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
X = load_boston().data
y = load_boston().target
print(load_boston().feature_names)
# splitting into train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y)
# apply preproc - fit on train
pipe_preproc.fit(X_train) # fit to training
X_train_prep = pipe_preproc.transform(X_train) # transform training data
X_test_prep = pipe_preproc.transform(X_test) # transform test data
# THE PIPE APPLIED
# apply preproc - fit on train
pipe_preproc = make_pipeline(PolynomialFeatures(),
StandardScaler())
pipe_preproc.fit(X_train) # fit to training
X_train_prep = pipe_preproc.transform(X_train) # transform training data
X_test_prep = pipe_preproc.transform(X_test) # transform test data
# WITHOUT PIPE
poly_trans = PolynomialFeatures()
scaler = StandardScaler()
X_train_poly = poly_trans.fit_transform(X_train)
X_test_poly = poly_trans.fit_transform(X_test)
scaler.fit(X_train_poly)
X_train_prep_alt = scaler.transform(X_train_poly)
X_test_prep_alt = scaler.transform(X_test_poly)
# splitting into development (2/3) and test data (1/3)
X_dev, X_test, y_dev, y_test = train_test_split(X, y, test_size=1/3, random_state=1)
# splitting development into train (1/3) and validation (1/3)
X_train, X_val, y_train, y_val = train_test_split(X_dev, y_dev, test_size=1/2, random_state=1)
from sklearn.linear_model import Lasso, LinearRegression
pipe_lr = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
LinearRegression())
print(pipe_lr.fit(X_dev, y_dev))
#pipe_lr.coef_
lin_reg = LinearRegression()
lin_reg.fit(X_dev,y_dev)
lin_reg.predict(X_dev)
lin_reg.coef_
test = pd.DataFrame(X_dev)
test.dtypes
test.head()
from sklearn.metrics import mean_squared_error as mse
perform = []
lambdas = np.logspace(-4, 4, 33)
for lambda_ in lambdas:
pipe_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=lambda_, random_state=1))
pipe_lasso.fit(X_train, y_train)
y_pred = pipe_lasso.predict(X_val)
perform.append(mse(y_pred, y_val))
hyperparam_perform = pd.Series(perform,index=lambdas)
optimal = hyperparam_perform.nsmallest(1)
print(optimal)
pipe_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=optimal.index[0]))
pipe_lasso.fit(X_dev,y_dev)
print('Lasso', round(mse(pipe_lasso.predict(X_test),y_test), 3))
print('LinReg', round(mse(pipe_lr.predict(X_test),y_test), 3))
from sklearn.model_selection import KFold
kfolds = KFold(n_splits=10)
mseCV = []
for lambda_ in lambdas:
pipe_lassoCV = make_pipeline(PolynomialFeatures(degree=3, include_bias=False),
StandardScaler(),
Lasso(alpha=lambda_, random_state=1))
mseCV_ = []
for train_idx, val_idx in kfolds.split(X_dev, y_dev):
X_train, y_train, = X_dev[train_idx], y_dev[train_idx]
X_val, y_val = X_dev[val_idx], y_dev[val_idx]
pipe_lassoCV.fit(X_train, y_train)
mseCV_.append(mse(pipe_lassoCV.predict(X_val), y_val))
mseCV.append(mseCV_)
optimalCV = pd.DataFrame(mseCV, index=lambdas).mean(axis=1).nsmallest(1)
pipe_lassoCV = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
Lasso(alpha=optimalCV.index[0], random_state=1))
pipe_lassoCV.fit(X_dev,y_dev)
model_pipes = ('Lasso', pipe_lasso), ('Lasso CV', pipe_lassoCV),('LinReg', pipe_lr)
for model_name, model_pipe in model_pipes:
score = mse(model_pipe.predict(X_test),y_test)
print(model_name, round(score, 1))
from sklearn.model_selection import learning_curve
train_sizes, train_scores, test_scores = \
learning_curve(estimator=pipe_lasso,
X=X_train,
y=y_train,
train_sizes=np.linspace(0.1, 1.0, 10),
scoring='neg_mean_squared_error',
cv=3)
mse_ = pd.DataFrame({'Train':-train_scores.mean(axis=1),
'Test':-test_scores.mean(axis=1)})\
.set_index(pd.Index(train_sizes,name='sample size'))
print(mse_.head())
f_learn, ax = plt.subplots(figsize=(10,4))
mse_.plot(ax=ax, logy=True)
ax.fill_between(train_sizes,
-train_scores.mean(1) + train_scores.std(1)*1.96,
-train_scores.mean(1) - train_scores.std(1)*1.96,
alpha=0.25,
color='orange')
ax.set_ylabel('Mean squared error')
from sklearn.model_selection import validation_curve
train_scores, test_scores = \
validation_curve(estimator=pipe_lasso,
X=X_train,
y=y_train,
param_name='lasso__alpha',
param_range=lambdas,
scoring='neg_mean_squared_error',
cv=3)
mse_score = pd.DataFrame({'Train':-train_scores.mean(axis=1),
'Validation':-test_scores.mean(axis=1),
'lambda':lambdas})\
.set_index('lambda')
print(mse_score.Test.nsmallest(1))
mse_score.plot(logx=True, logy=True)
from sklearn.model_selection import GridSearchCV
gs = GridSearchCV(estimator=pipe_lasso,
param_grid={'lasso__alpha':lambdas},
scoring='neg_mean_squared_error',
cv=10)
gs = gs.fit(X_train, y_train)
gs.best_params_
from sklearn.decomposition import PCA
pipe_pca_lasso = make_pipeline(PolynomialFeatures(include_bias=False),
StandardScaler(),
PCA(),
Lasso())
gs = GridSearchCV(estimator=pipe_pca_lasso,
param_grid={'lasso__alpha':lambdas,
'pca__n_components':range(1, X_train.shape[1]+1)},
scoring='neg_mean_squared_error',
cv=10)
gs = gs.fit(X_train, y_train)
gs.best_params_
from sklearn.metrics import precision_score, recall_score, f1_score
| 0.612889 | 0.924415 |
```
import pandas as pd
from datetime import datetime
import trdb2py
isStaticImg = False
width = 960
height = 768
pd.options.display.max_columns = None
pd.options.display.max_rows = None
trdb2cfg = trdb2py.loadConfig('./trdb2.yaml')
# 具体基金
# asset = 'jrj.510310'
# asset = 'jqdata.000036_XSHG|1d'
asset = 'jqdata.000037_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2020-10-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
tsEnd = -1
# tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# 初始资金池
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
# 卖出参数,全部卖出
paramssell = trdb2py.trading2_pb2.SellParams(
perVolume=1,
)
def calcweekday2val2(wday, offday):
if offday == 1:
if wday == 5:
return 3
if offday == 2:
if wday >= 4:
return 4
if offday == 3:
if wday >= 3:
return 5
if offday == 4:
if wday >= 2:
return 6
return offday
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
hs300cs = trdb2py.trading2_pb2.Candle(
ts=int(trdb2py.str2timestamp('2021-05-10', '%Y-%m-%d')),
open=49924200,
close=49924200,
high=49924200,
low=49924200,
)
hs300cs1 = trdb2py.trading2_pb2.Candle(
ts=int(trdb2py.str2timestamp('2021-05-11', '%Y-%m-%d')),
open=49924200,
close=49924200,
high=49924200,
low=49924200,
)
hs300cs2 = trdb2py.trading2_pb2.Candle(
ts=int(trdb2py.str2timestamp('2021-05-12', '%Y-%m-%d')),
open=49924200,
close=49924200,
high=49924200,
low=49924200,
)
hs300cs3 = trdb2py.trading2_pb2.Candle(
ts=int(trdb2py.str2timestamp('2021-05-13', '%Y-%m-%d')),
open=49924200,
close=49924200,
high=49924200,
low=49924200,
)
hs300cs4 = trdb2py.trading2_pb2.Candle(
ts=int(trdb2py.str2timestamp('2021-05-14', '%Y-%m-%d')),
open=49924200,
close=49924200,
high=49924200,
low=49924200,
)
hs300 = trdb2py.trading2_pb2.Candles(
market='jqdata',
symbol='000300_XSHG|1d',
# candles=[hs300cs, hs300cs1, hs300cs2, hs300cs3, hs300cs4],
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='baseline',
offset=29,
candles=[hs300],
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0, ignoreCache=True)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
```
我们还是拿沪深300指数,2013年5月1日到2020年9月30日的日线数据来做例子。
```
ret = trdb2py.getAssetCandles2(trdb2cfg, asset, tsStart, tsEnd, indicators=['ta-ema.29'], offset=29, simCandle=hs300, indicatorScale=10000)
# print(ret)
# ret
trdb2py.showAssetCandles2('上证医药指数', ret, indicators=['ta-ema.29'], toImg=isStaticImg, width=width, height=height)
lstparams = []
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='weekday2',
vals=[4, calcweekday2val2(4, 4)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['up'],
strVals=['ta-ema.{}'.format(29)],
)
buy2 = trdb2py.trading2_pb2.CtrlCondition(
name='weekday2',
vals=[1, calcweekday2val2(1, 4)],
group=1,
)
buy3 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['down'],
strVals=['ta-ema.29'],
group=1,
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='weekday',
vals=[3],
)
sell1 = trdb2py.trading2_pb2.CtrlCondition(
name='ctrlconditionid',
vals=[1],
strVals=['buy'],
)
sell2 = trdb2py.trading2_pb2.CtrlCondition(
name='weekday',
vals=[5],
group=1,
)
sell3 = trdb2py.trading2_pb2.CtrlCondition(
name='ctrlconditionid',
vals=[2],
strVals=['buy'],
group=1,
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0, buy1, buy2, buy3])
s0.sell.extend([sell0, sell1, sell2, sell3])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='周内效应混合策略',
offset=29,
candles=[hs300],
)
pnlm = trdb2py.simTrading(trdb2cfg, p0)
# trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
sts = trdb2py.getFirstCtrlTs(pnlm)
trdb2py.showPNLs([pnlm, pnlBaseline], toImg=isStaticImg, isShowBuy=True, isShowSell=True, width=width, height=height, startTs=sts)
npnl0 = trdb2py.clonePNLWithTs(pnlBaseline['pnl'], sts)
npnl1 = trdb2py.clonePNLWithTs(pnlm['pnl'], sts)
lstallpnl = [{'title': '上证医药指数', 'pnl': npnl0},
{'title': '周内效应混合策略', 'pnl': npnl1}]
for v in lstallpnl:
trdb2py.rebuildPNL(v['pnl'])
dflstallpnl = trdb2py.buildPNLReport(lstallpnl)
dflstallpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility']].sort_values(by='totalReturns', ascending=False)
# dfpnl = trdb2py.buildPNLReport([pnlm, pnlBaseline])
# # dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
# dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
```
|
github_jupyter
|
import pandas as pd
from datetime import datetime
import trdb2py
isStaticImg = False
width = 960
height = 768
pd.options.display.max_columns = None
pd.options.display.max_rows = None
trdb2cfg = trdb2py.loadConfig('./trdb2.yaml')
# 具体基金
# asset = 'jrj.510310'
# asset = 'jqdata.000036_XSHG|1d'
asset = 'jqdata.000037_XSHG|1d'
# 起始时间,0表示从最开始算起
# tsStart = 0
tsStart = int(trdb2py.str2timestamp('2020-10-01', '%Y-%m-%d'))
# 结束时间,-1表示到现在为止
tsEnd = -1
# tsEnd = int(trdb2py.str2timestamp('2020-09-30', '%Y-%m-%d'))
# 初始资金池
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
# 买入参数,用全部的钱来买入(也就是复利)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
# 卖出参数,全部卖出
paramssell = trdb2py.trading2_pb2.SellParams(
perVolume=1,
)
def calcweekday2val2(wday, offday):
if offday == 1:
if wday == 5:
return 3
if offday == 2:
if wday >= 4:
return 4
if offday == 3:
if wday >= 3:
return 5
if offday == 4:
if wday >= 2:
return 6
return offday
# baseline
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='buyandhold',
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
hs300cs = trdb2py.trading2_pb2.Candle(
ts=int(trdb2py.str2timestamp('2021-05-10', '%Y-%m-%d')),
open=49924200,
close=49924200,
high=49924200,
low=49924200,
)
hs300cs1 = trdb2py.trading2_pb2.Candle(
ts=int(trdb2py.str2timestamp('2021-05-11', '%Y-%m-%d')),
open=49924200,
close=49924200,
high=49924200,
low=49924200,
)
hs300cs2 = trdb2py.trading2_pb2.Candle(
ts=int(trdb2py.str2timestamp('2021-05-12', '%Y-%m-%d')),
open=49924200,
close=49924200,
high=49924200,
low=49924200,
)
hs300cs3 = trdb2py.trading2_pb2.Candle(
ts=int(trdb2py.str2timestamp('2021-05-13', '%Y-%m-%d')),
open=49924200,
close=49924200,
high=49924200,
low=49924200,
)
hs300cs4 = trdb2py.trading2_pb2.Candle(
ts=int(trdb2py.str2timestamp('2021-05-14', '%Y-%m-%d')),
open=49924200,
close=49924200,
high=49924200,
low=49924200,
)
hs300 = trdb2py.trading2_pb2.Candles(
market='jqdata',
symbol='000300_XSHG|1d',
# candles=[hs300cs, hs300cs1, hs300cs2, hs300cs3, hs300cs4],
)
s0.buy.extend([buy0])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='baseline',
offset=29,
candles=[hs300],
)
pnlBaseline = trdb2py.simTrading(trdb2cfg, p0, ignoreCache=True)
trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height)
ret = trdb2py.getAssetCandles2(trdb2cfg, asset, tsStart, tsEnd, indicators=['ta-ema.29'], offset=29, simCandle=hs300, indicatorScale=10000)
# print(ret)
# ret
trdb2py.showAssetCandles2('上证医药指数', ret, indicators=['ta-ema.29'], toImg=isStaticImg, width=width, height=height)
lstparams = []
s0 = trdb2py.trading2_pb2.Strategy(
name="normal",
asset=trdb2py.str2asset(asset),
)
buy0 = trdb2py.trading2_pb2.CtrlCondition(
name='weekday2',
vals=[4, calcweekday2val2(4, 4)],
)
buy1 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['up'],
strVals=['ta-ema.{}'.format(29)],
)
buy2 = trdb2py.trading2_pb2.CtrlCondition(
name='weekday2',
vals=[1, calcweekday2val2(1, 4)],
group=1,
)
buy3 = trdb2py.trading2_pb2.CtrlCondition(
name='indicatorsp',
operators=['down'],
strVals=['ta-ema.29'],
group=1,
)
sell0 = trdb2py.trading2_pb2.CtrlCondition(
name='weekday',
vals=[3],
)
sell1 = trdb2py.trading2_pb2.CtrlCondition(
name='ctrlconditionid',
vals=[1],
strVals=['buy'],
)
sell2 = trdb2py.trading2_pb2.CtrlCondition(
name='weekday',
vals=[5],
group=1,
)
sell3 = trdb2py.trading2_pb2.CtrlCondition(
name='ctrlconditionid',
vals=[2],
strVals=['buy'],
group=1,
)
paramsbuy = trdb2py.trading2_pb2.BuyParams(
perHandMoney=1,
)
paramsinit = trdb2py.trading2_pb2.InitParams(
money=10000,
)
s0.buy.extend([buy0, buy1, buy2, buy3])
s0.sell.extend([sell0, sell1, sell2, sell3])
s0.paramsBuy.CopyFrom(paramsbuy)
s0.paramsSell.CopyFrom(paramssell)
s0.paramsInit.CopyFrom(paramsinit)
p0 = trdb2py.trading2_pb2.SimTradingParams(
assets=[trdb2py.str2asset(asset)],
startTs=tsStart,
endTs=tsEnd,
strategies=[s0],
title='周内效应混合策略',
offset=29,
candles=[hs300],
)
pnlm = trdb2py.simTrading(trdb2cfg, p0)
# trdb2py.showPNLs(lstpnlmix + [pnlBaseline], toImg=isStaticImg, width=width, height=height)
sts = trdb2py.getFirstCtrlTs(pnlm)
trdb2py.showPNLs([pnlm, pnlBaseline], toImg=isStaticImg, isShowBuy=True, isShowSell=True, width=width, height=height, startTs=sts)
npnl0 = trdb2py.clonePNLWithTs(pnlBaseline['pnl'], sts)
npnl1 = trdb2py.clonePNLWithTs(pnlm['pnl'], sts)
lstallpnl = [{'title': '上证医药指数', 'pnl': npnl0},
{'title': '周内效应混合策略', 'pnl': npnl1}]
for v in lstallpnl:
trdb2py.rebuildPNL(v['pnl'])
dflstallpnl = trdb2py.buildPNLReport(lstallpnl)
dflstallpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility']].sort_values(by='totalReturns', ascending=False)
# dfpnl = trdb2py.buildPNLReport([pnlm, pnlBaseline])
# # dfpnl1 = dfpnl[dfpnl['totalReturns'] >= 2]
# dfpnl[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility', 'variance']].sort_values(by='totalReturns', ascending=False)
| 0.108283 | 0.301207 |
```
import dataset, model
import albumentations as A
import matplotlib.pyplot as plt
import pandas as pd
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
import random, os
def set_seed(seed: int = 42):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) # type: ignore
torch.backends.cudnn.deterministic = True # type: ignore
torch.backends.cudnn.benchmark = True # type: ignore
set_seed(0) # DONT CHANGE THE SEED
import importlib
importlib.reload(model)
importlib.reload(dataset)
train = pd.read_csv('data/train.csv')
val = pd.read_csv('data/val.csv')
train.ImageID = [f'train/{i}' for i in train.ImageID]
val.ImageID = [f'val/{i}' for i in val.ImageID]
trainval = pd.concat((train,val))
trainval.to_csv('data/trainval.csv')
## RUN THIS CELL TWICE FOR TWO SEPERATE MODELS
import model
import albumentations as A
from albumentations.augmentations.transforms import Flip, Blur, ChannelShuffle
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
if __name__ == '__main__':
ckpt = [ModelCheckpoint()
]
trainer = Trainer(max_epochs = 100,gpus = 1, callbacks = ckpt, precision=16,deterministic=True,fast_dev_run = False)
train_tr = A.Compose([
A.augmentations.transforms.ColorJitter(),
A.augmentations.transforms.GaussNoise(),
#A.Resize(32,100)
A.Affine(0.7,translate_px = 15),
Blur(),
A.ChannelShuffle()
])
val_tr = A.Compose([
#A.CenterCrop(128,128),
#A.Resize(32,100)
])
model = model.Classifier({'lr':3e-4,'batch_size':64,'train_tr':train_tr,'val_tr':val_tr})
trainer.fit(model)
trainer.test(model)
import model
model = model.Classifier({'lr':3e-4,'batch_size':32,'train_tr':train_tr,'val_tr':val_tr})
ckpt = torch.load('lightning_logs/version_65/checkpoints/epoch=99-step=68799.ckpt') # PATH OF CHECKPOINT 1
model.load_state_dict(ckpt['state_dict'])
trainer.test(model)
out = trainer.predict(model)
# SHOULD SHOW A LOSS AROUND 0.000930169 AND A MSE OF AROUND 66.6572494. IF NOT SOMETHING WASN'T DONE CORRECTLY
# IF THE LOSS ISN'T EXACLTY 0.000930169 THE LB-SCORE WILL DIFFER SLIGHTLY
ckpt = torch.load('lightning_logs/version_66/checkpoints/epoch=97-step=67423.ckpt') # PATH OF CHECKPOINT 2
model.load_state_dict(ckpt['state_dict'])
trainer.test(model)
out2 = trainer.predict(model)
# SHOULD SHOW A LOSS AROUND 0.000918057 AND A MSE OF UNDER 50. IF NOT SOMETHING WASN'T DONE CORRECTLY
# IF THE LOSS ISN'T EXACLTY 0.000918057 THE LB-SCORE WILL DIFFER SLIGHTLY
def removeBatches(t):
concat = torch.tensor([])
for i in range(len(t)):
concat = torch.cat((concat,torch.tensor(t[i])))
return concat
import model
out = removeBatches(out)
out2 = removeBatches(out2)
out = ((out+out2)/2)
concat = model.toNum(out).squeeze()
import model
model.writeSub(concat)
!aicrowd submission create -c f1-speed-recognition -f submission.csv
```
|
github_jupyter
|
import dataset, model
import albumentations as A
import matplotlib.pyplot as plt
import pandas as pd
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
import random, os
def set_seed(seed: int = 42):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) # type: ignore
torch.backends.cudnn.deterministic = True # type: ignore
torch.backends.cudnn.benchmark = True # type: ignore
set_seed(0) # DONT CHANGE THE SEED
import importlib
importlib.reload(model)
importlib.reload(dataset)
train = pd.read_csv('data/train.csv')
val = pd.read_csv('data/val.csv')
train.ImageID = [f'train/{i}' for i in train.ImageID]
val.ImageID = [f'val/{i}' for i in val.ImageID]
trainval = pd.concat((train,val))
trainval.to_csv('data/trainval.csv')
## RUN THIS CELL TWICE FOR TWO SEPERATE MODELS
import model
import albumentations as A
from albumentations.augmentations.transforms import Flip, Blur, ChannelShuffle
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
if __name__ == '__main__':
ckpt = [ModelCheckpoint()
]
trainer = Trainer(max_epochs = 100,gpus = 1, callbacks = ckpt, precision=16,deterministic=True,fast_dev_run = False)
train_tr = A.Compose([
A.augmentations.transforms.ColorJitter(),
A.augmentations.transforms.GaussNoise(),
#A.Resize(32,100)
A.Affine(0.7,translate_px = 15),
Blur(),
A.ChannelShuffle()
])
val_tr = A.Compose([
#A.CenterCrop(128,128),
#A.Resize(32,100)
])
model = model.Classifier({'lr':3e-4,'batch_size':64,'train_tr':train_tr,'val_tr':val_tr})
trainer.fit(model)
trainer.test(model)
import model
model = model.Classifier({'lr':3e-4,'batch_size':32,'train_tr':train_tr,'val_tr':val_tr})
ckpt = torch.load('lightning_logs/version_65/checkpoints/epoch=99-step=68799.ckpt') # PATH OF CHECKPOINT 1
model.load_state_dict(ckpt['state_dict'])
trainer.test(model)
out = trainer.predict(model)
# SHOULD SHOW A LOSS AROUND 0.000930169 AND A MSE OF AROUND 66.6572494. IF NOT SOMETHING WASN'T DONE CORRECTLY
# IF THE LOSS ISN'T EXACLTY 0.000930169 THE LB-SCORE WILL DIFFER SLIGHTLY
ckpt = torch.load('lightning_logs/version_66/checkpoints/epoch=97-step=67423.ckpt') # PATH OF CHECKPOINT 2
model.load_state_dict(ckpt['state_dict'])
trainer.test(model)
out2 = trainer.predict(model)
# SHOULD SHOW A LOSS AROUND 0.000918057 AND A MSE OF UNDER 50. IF NOT SOMETHING WASN'T DONE CORRECTLY
# IF THE LOSS ISN'T EXACLTY 0.000918057 THE LB-SCORE WILL DIFFER SLIGHTLY
def removeBatches(t):
concat = torch.tensor([])
for i in range(len(t)):
concat = torch.cat((concat,torch.tensor(t[i])))
return concat
import model
out = removeBatches(out)
out2 = removeBatches(out2)
out = ((out+out2)/2)
concat = model.toNum(out).squeeze()
import model
model.writeSub(concat)
!aicrowd submission create -c f1-speed-recognition -f submission.csv
| 0.474631 | 0.251855 |
# Joining Data Sets
Let us understand how to join multiple Data Sets using Spark based APIs.
## Prepare Datasets for Joins
Let us prepare Dataset to join and get the details related to airports (origin and destination).
* Make sure airport-codes is in HDFS.
```
%%sh
hdfs dfs -ls /public/airlines_all/airport-codes
```
## Starting Spark Context
Let us start spark context for this Notebook so that we can execute the code provided.
```
from pyspark.sql import SparkSession
spark = SparkSession. \
builder. \
config('spark.ui.port', '0'). \
appName('Joining Data Sets'). \
master('yarn'). \
getOrCreate()
spark.conf.set('spark.sql.shuffle.partitions', '2')
```
* Analyze the Dataset to confirm if there is header and also how the data is structured.
```
spark.read. \
text("/public/airlines_all/airport-codes"). \
show(truncate=False)
```
* Data is tab separated.
* There is header for the data set.
* Dataset have 4 fields - **Country, State, City, IATA**
Create DataFrame airport_codes applying appropriate Schema.
```
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
)
```
* Preview and Understand the data.
```
airport_codes.show()
```
* Get schema of **airport_codes**.
```
airport_codes.printSchema()
```
* Preview the data
* Get the count of records
```
airport_codes.count()
```
* Get the count of unique records and see if it is the same as total count.
```
airport_codes. \
select("IATA"). \
distinct(). \
count()
```
* If they are not equal, analyze the data and identify IATA codes which are repeated more than once.
```
from pyspark.sql.functions import lit, count
duplicate_iata_count = airport_codes. \
groupBy("IATA"). \
agg(count(lit(1)).alias("iata_count")). \
filter("iata_count > 1")
duplicate_iata_count.show()
```
* Filter out the duplicates using the most appropriate one and discard others.
```
airport_codes. \
filter("IATA = 'Big'"). \
show()
airport_codes. \
filter("!(State = 'Hawaii' AND IATA = 'Big')"). \
show()
```
* Get number of airports (IATA Codes) for each state in the US. Sort the data in descending order by count.
```
from pyspark.sql.functions import col, lit, count
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big') AND Country='USA'")
airport_count_per_state = airport_codes. \
groupBy("Country", "State"). \
agg(count(lit(1)).alias("IATACount")). \
orderBy(col("IATACount").desc())
airport_count_per_state.show()
```
## Joining Data Frames
Let us understand how to join Data Frames by using some problem statements. Use 2008 January data.
* Get number of flights departed from each of the US airport.
* Get number of flights departed from each of the state.
* Get the list of airports in the US from which flights are not departed.
* Check if there are any origins in airlines data which do not have record in airport-codes.
* Get the total number of flights from the airports that do not contain entries in airport-codes.
* Get the total number of flights per airport that do not contain entries in airport-codes.
## Overview of Joins
Let us get an overview of joining Data Frames.
* Our data cannot be stored in one table. It will be stored in multiple tables and the tables might be related.
* When it comes to transactional systems, we typically define tables based on Normalization Principles.
* When it comes to data warehousing applications, we typically define tables using Dimensional Modeling.
* Either of the approach data is scattered into multiple tables and relationships are defined.
* Typically tables are related with one to one, one to many, many to many relationships.
* When we have 2 Data Sets that are related based on a common key we typically perform join.
* There are different types of joins.
* INNER JOIN
* OUTER JOIN (LEFT or RIGHT)
* FULL OUTER JOIN (a LEFT OUTER JOIN b UNION a RIGHT OUTER JOIN b)
## Solutions - Problem 1
Get number of flights departed from each of the US airport.
```
from pyspark.sql.functions import col, lit, count
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airlines.show()
airlines.select("Year", "Month", "DayOfMonth", "Origin", "Dest", "CRSDepTime").show()
airlines.count()
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big') AND Country='USA'")
airport_codes.count()
airlines. \
join(airport_codes, col("IATA") == col("Origin")). \
select("Year", "Month", "DayOfMonth", airport_codes["*"], "CRSDepTime"). \
show()
airlines. \
join(airport_codes, airport_codes.IATA == airlines["Origin"]). \
select("Year", "Month", "DayOfMonth", airport_codes["*"], "CRSDepTime"). \
show()
airlines.join?
flight_count_per_airport = airlines. \
join(airport_codes, airport_codes.IATA == airlines.Origin). \
groupBy("Origin"). \
agg(count(lit(1)).alias("FlightCount")). \
orderBy(col("FlightCount").desc())
flight_count_per_airport.show()
```
## Solutions - Problem 2
Get number of flights departed from each of the state.
```
from pyspark.sql.functions import col, lit, count
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big') AND Country='USA'")
flight_count_per_state = airlines. \
join(airport_codes, airport_codes.IATA == airlines.Origin). \
groupBy("State"). \
agg(count(lit(1)).alias("FlightCount")). \
orderBy(col("FlightCount").desc())
flight_count_per_state.show()
```
## Solutions - Problem 3
Get the list of airports in the US from which flights are not departed.
```
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big') AND Country='USA'")
airport_codes.printSchema()
airports_not_used = airport_codes. \
join(airlines, airport_codes.IATA == airlines.Origin, "left"). \
select(airport_codes["*"], "Year", "Month",
"DayOfMonth", "Origin", "CRSDepTime"). \
show()
airports_not_used = airport_codes. \
join(airlines, airport_codes.IATA == airlines.Origin, "left"). \
filter(airlines.Origin.isNull()). \
select('City', 'State', 'Country', 'IATA')
airports_not_used = airlines. \
join(airport_codes, airport_codes.IATA == airlines.Origin, "right"). \
filter("Origin IS NULL"). \
select('City', 'State', 'Country', 'IATA')
airports_not_used.count()
airport_codes.show()
```
## Solutions - Problem 4
Check if there are any origins in airlines data which do not have record in airport-codes.
```
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big')")
airlines. \
join(airport_codes, airlines.Origin == airport_codes.IATA, "left"). \
filter("IATA IS NULL"). \
select("Origin"). \
distinct(). \
show()
airlines. \
join(airport_codes, airlines.Origin == airport_codes.IATA, "left"). \
filter("IATA IS NULL"). \
select("Origin"). \
distinct(). \
count()
```
## Solutions - Problem 5
Get the total number of flights from the airports that do not contain entries in airport-codes.
```
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big')")
airlines. \
join(airport_codes, airlines.Origin == airport_codes.IATA, "left"). \
filter("IATA IS NULL"). \
count()
```
## Solutions - Problem 6
Get the total number of flights per airport that do not contain entries in airport-codes.
```
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big')")
airlines. \
join(airport_codes, airlines.Origin == airport_codes.IATA, "left"). \
filter("IATA IS NULL"). \
groupBy("Origin"). \
count(). \
show()
```
|
github_jupyter
|
%%sh
hdfs dfs -ls /public/airlines_all/airport-codes
from pyspark.sql import SparkSession
spark = SparkSession. \
builder. \
config('spark.ui.port', '0'). \
appName('Joining Data Sets'). \
master('yarn'). \
getOrCreate()
spark.conf.set('spark.sql.shuffle.partitions', '2')
spark.read. \
text("/public/airlines_all/airport-codes"). \
show(truncate=False)
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
)
airport_codes.show()
airport_codes.printSchema()
airport_codes.count()
airport_codes. \
select("IATA"). \
distinct(). \
count()
from pyspark.sql.functions import lit, count
duplicate_iata_count = airport_codes. \
groupBy("IATA"). \
agg(count(lit(1)).alias("iata_count")). \
filter("iata_count > 1")
duplicate_iata_count.show()
airport_codes. \
filter("IATA = 'Big'"). \
show()
airport_codes. \
filter("!(State = 'Hawaii' AND IATA = 'Big')"). \
show()
from pyspark.sql.functions import col, lit, count
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big') AND Country='USA'")
airport_count_per_state = airport_codes. \
groupBy("Country", "State"). \
agg(count(lit(1)).alias("IATACount")). \
orderBy(col("IATACount").desc())
airport_count_per_state.show()
from pyspark.sql.functions import col, lit, count
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airlines.show()
airlines.select("Year", "Month", "DayOfMonth", "Origin", "Dest", "CRSDepTime").show()
airlines.count()
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big') AND Country='USA'")
airport_codes.count()
airlines. \
join(airport_codes, col("IATA") == col("Origin")). \
select("Year", "Month", "DayOfMonth", airport_codes["*"], "CRSDepTime"). \
show()
airlines. \
join(airport_codes, airport_codes.IATA == airlines["Origin"]). \
select("Year", "Month", "DayOfMonth", airport_codes["*"], "CRSDepTime"). \
show()
airlines.join?
flight_count_per_airport = airlines. \
join(airport_codes, airport_codes.IATA == airlines.Origin). \
groupBy("Origin"). \
agg(count(lit(1)).alias("FlightCount")). \
orderBy(col("FlightCount").desc())
flight_count_per_airport.show()
from pyspark.sql.functions import col, lit, count
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big') AND Country='USA'")
flight_count_per_state = airlines. \
join(airport_codes, airport_codes.IATA == airlines.Origin). \
groupBy("State"). \
agg(count(lit(1)).alias("FlightCount")). \
orderBy(col("FlightCount").desc())
flight_count_per_state.show()
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big') AND Country='USA'")
airport_codes.printSchema()
airports_not_used = airport_codes. \
join(airlines, airport_codes.IATA == airlines.Origin, "left"). \
select(airport_codes["*"], "Year", "Month",
"DayOfMonth", "Origin", "CRSDepTime"). \
show()
airports_not_used = airport_codes. \
join(airlines, airport_codes.IATA == airlines.Origin, "left"). \
filter(airlines.Origin.isNull()). \
select('City', 'State', 'Country', 'IATA')
airports_not_used = airlines. \
join(airport_codes, airport_codes.IATA == airlines.Origin, "right"). \
filter("Origin IS NULL"). \
select('City', 'State', 'Country', 'IATA')
airports_not_used.count()
airport_codes.show()
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big')")
airlines. \
join(airport_codes, airlines.Origin == airport_codes.IATA, "left"). \
filter("IATA IS NULL"). \
select("Origin"). \
distinct(). \
show()
airlines. \
join(airport_codes, airlines.Origin == airport_codes.IATA, "left"). \
filter("IATA IS NULL"). \
select("Origin"). \
distinct(). \
count()
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big')")
airlines. \
join(airport_codes, airlines.Origin == airport_codes.IATA, "left"). \
filter("IATA IS NULL"). \
count()
airlines_path = "/public/airlines_all/airlines-part/flightmonth=200801"
airlines = spark. \
read. \
parquet(airlines_path)
airport_codes_path = "/public/airlines_all/airport-codes"
airport_codes = spark. \
read. \
csv(airport_codes_path,
sep="\t",
header=True,
inferSchema=True
). \
filter("!(State = 'Hawaii' AND IATA = 'Big')")
airlines. \
join(airport_codes, airlines.Origin == airport_codes.IATA, "left"). \
filter("IATA IS NULL"). \
groupBy("Origin"). \
count(). \
show()
| 0.432303 | 0.974435 |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/AssetManagement/export_vector.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/AssetManagement/export_vector.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=AssetManagement/export_vector.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/AssetManagement/export_vector.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
```
# %%capture
# !pip install earthengine-api
# !pip install geehydro
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for the first time or if you are getting an authentication error.
```
# ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
|
github_jupyter
|
# %%capture
# !pip install earthengine-api
# !pip install geehydro
import ee
import folium
import geehydro
# ee.Authenticate()
ee.Initialize()
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
| 0.324663 | 0.93646 |
# Create Azure and Batch AI Resources
In this notebook we will create the necessary resources to train a ResNet50 model([ResNet50](https://arxiv.org/abs/1512.03385)) in a distributed fashion using [Horovod](https://github.com/uber/horovod) on the ImageNet dataset. If you plan on using fake data then the sections marked optional can be skipped. This notebook will take you through the following steps:
* [Create Azure Resources](#azure_resources)
* [Create Fileserver(NFS)](#create_fileshare)
* [Upload Data to Blob (Optional)](#upload_data)
* [Configure Batch AI Cluster](#configure_cluster)
```
import sys
sys.path.append("common")
from dotenv import set_key
import os
import json
from utils import get_password, dotenv_for
from pathlib import Path
```
Below are the variables that describe our experiment. By default we are using the NC24rs_v3 (Standard_NC24rs_v3) VMs which have V100 GPUs and Infiniband. By default we are using 2 nodes with each node having 4 GPUs, this equates to 8 GPUs. Feel free to increase the number of nodes but be aware what limitations your subscription may have.
Set the USE_FAKE to True if you want to use fake data rather than the Imagenet dataset. This is often a good way to debug your models as well as checking what IO overhead is.
```
# Variables for Batch AI - change as necessary
ID = "dtdemo"
GROUP_NAME = f"batch{ID}rg"
STORAGE_ACCOUNT_NAME = f"batch{ID}st"
FILE_SHARE_NAME = f"batch{ID}share"
SELECTED_SUBSCRIPTION = "<YOUR_SUBSCRIPTION>"
WORKSPACE = "workspace"
NUM_NODES = 2
CLUSTER_NAME = "msv100"
VM_SIZE = "Standard_NC24rs_v3"
GPU_TYPE = "V100"
PROCESSES_PER_NODE = 4
LOCATION = "eastus"
NFS_NAME = f"batch{ID}nfs"
USERNAME = "batchai_user"
USE_FAKE = False
DOCKERHUB = os.getenv('DOCKER_REPOSITORY', "masalvar")
DATA = Path("/data")
CONTAINER_NAME = f"batch{ID}container"
DOCKER_PWD = "<YOUR_DOCKER_PWD>"
dotenv_path = dotenv_for()
set_key(dotenv_path, 'DOCKER_PWD', DOCKER_PWD)
set_key(dotenv_path, 'GROUP_NAME', GROUP_NAME)
set_key(dotenv_path, 'FILE_SHARE_NAME', FILE_SHARE_NAME)
set_key(dotenv_path, 'WORKSPACE', WORKSPACE)
set_key(dotenv_path, 'NUM_NODES', str(NUM_NODES))
set_key(dotenv_path, 'CLUSTER_NAME', CLUSTER_NAME)
set_key(dotenv_path, 'GPU_TYPE', GPU_TYPE)
set_key(dotenv_path, 'PROCESSES_PER_NODE', str(PROCESSES_PER_NODE))
set_key(dotenv_path, 'STORAGE_ACCOUNT_NAME', STORAGE_ACCOUNT_NAME)
```
<a id='azure_resources'></a>
## Create Azure Resources
First we need to log in to our Azure account.
```
!az login -o table
```
If you have more than one Azure account you will need to select it with the command below. If you only have one account you can skip this step.
```
!az account set --subscription "$SELECTED_SUBSCRIPTION"
!az account list -o table
```
Next we create the group that will hold all our Azure resources.
```
!az group create -n $GROUP_NAME -l $LOCATION -o table
```
We will create the storage account that will store our fileshare where all the outputs from the jobs will be stored.
```
json_data = !az storage account create -l $LOCATION -n $STORAGE_ACCOUNT_NAME -g $GROUP_NAME --sku Standard_LRS
print('Storage account {} provisioning state: {}'.format(STORAGE_ACCOUNT_NAME,
json.loads(''.join(json_data))['provisioningState']))
json_data = !az storage account keys list -n $STORAGE_ACCOUNT_NAME -g $GROUP_NAME
storage_account_key = json.loads(''.join([i for i in json_data if 'WARNING' not in i]))[0]['value']
!az storage share create --account-name $STORAGE_ACCOUNT_NAME \
--account-key $storage_account_key --name $FILE_SHARE_NAME
!az storage directory create --share-name $FILE_SHARE_NAME --name scripts \
--account-name $STORAGE_ACCOUNT_NAME --account-key $storage_account_key
```
Here we are setting some defaults so we don't have to keep adding them to every command
```
!az configure --defaults location=$LOCATION
!az configure --defaults group=$GROUP_NAME
%env AZURE_STORAGE_ACCOUNT $STORAGE_ACCOUNT_NAME
%env AZURE_STORAGE_KEY=$storage_account_key
```
#### Create Workspace
Batch AI has the concept of workspaces and experiments. Below we will create the workspace for our work.
```
!az batchai workspace create -n $WORKSPACE -g $GROUP_NAME
```
<a id='upload_data'></a>
## Upload Data to Blob (Optional)
In this section we will create a blob container and upload the imagenet data we prepared locally in the previous notebook.
**You only need to run this section if you want to use real data. If USE_FAKE is set to False the commands below won't be executed.**
```
if USE_FAKE is False:
!az storage container create --account-name {STORAGE_ACCOUNT_NAME} \
--account-key {storage_account_key} \
--name {CONTAINER_NAME}
if USE_FAKE is False:
# Should take about 20 minutes
!azcopy --source {DATA/"train.tar.gz"} \
--destination https://{STORAGE_ACCOUNT_NAME}.blob.core.windows.net/{CONTAINER_NAME}/train.tar.gz \
--dest-key {storage_account_key} --quiet
if USE_FAKE is False:
!azcopy --source {DATA/"validation.tar.gz"} \
--destination https://{STORAGE_ACCOUNT_NAME}.blob.core.windows.net/{CONTAINER_NAME}/validation.tar.gz \
--dest-key {storage_account_key} --quiet
```
<a id='create_fileshare'></a>
## Create Fileserver
In this example we will store the data on an NFS fileshare. It is possible to use many storage solutions with Batch AI. NFS offers the best tradeoff between performance and ease of use. The best performance is achieved by loading the data locally but this can be cumbersome since it requires that the data is download by the all the nodes which with the ImageNet dataset can take hours. If you are using fake data we won't be using the fileserver but we will create one so that if you want to run the real ImageNet data later the server is ready.
```
!az batchai file-server create -n $NFS_NAME --disk-count 4 --disk-size 250 -w $WORKSPACE \
-s Standard_DS4_v2 -u $USERNAME -p {get_password(dotenv_for())} -g $GROUP_NAME --storage-sku Premium_LRS
!az batchai file-server list -o table -w $WORKSPACE -g $GROUP_NAME
json_data = !az batchai file-server list -w $WORKSPACE -g $GROUP_NAME
nfs_ip=json.loads(''.join([i for i in json_data if 'WARNING' not in i]))[0]['mountSettings']['fileServerPublicIp']
```
After we have created the NFS share we need to copy the data to it. To do this we write the script below which will be executed on the fileserver. It installs a tool called azcopy and then downloads and extracts the data to the appropriate directory.
```
nodeprep_script = f"""
#!/usr/bin/env bash
wget https://gist.githubusercontent.com/msalvaris/073c28a9993d58498957294d20d74202/raw/87a78275879f7c9bb8d6fb9de8a2d2996bb66c24/install_azcopy
chmod 777 install_azcopy
sudo ./install_azcopy
mkdir -p /data/imagenet
azcopy --source https://{STORAGE_ACCOUNT_NAME}.blob.core.windows.net/{CONTAINER_NAME}/validation.tar.gz \
--destination /data/imagenet/validation.tar.gz\
--source-key {storage_account_key}\
--quiet
azcopy --source https://{STORAGE_ACCOUNT_NAME}.blob.core.windows.net/{CONTAINER_NAME}/train.tar.gz \
--destination /data/imagenet/train.tar.gz\
--source-key {storage_account_key}\
--quiet
cd /data/imagenet
tar -xzf train.tar.gz
tar -xzf validation.tar.gz
"""
with open('nodeprep.sh', 'w') as f:
f.write(nodeprep_script)
```
Next we will copy the file over and run it on the NFS VM. This will install azcopy and download and prepare the data
```
if USE_FAKE:
raise Warning("You should not be running this section if you simply want to use fake data")
if USE_FAKE is False:
!sshpass -p {get_password(dotenv_for())} scp -o "StrictHostKeyChecking=no" nodeprep.sh $USERNAME@{nfs_ip}:~/
if USE_FAKE is False:
!sshpass -p {get_password(dotenv_for())} ssh -o "StrictHostKeyChecking=no" $USERNAME@{nfs_ip} "sudo chmod 777 ~/nodeprep.sh && ./nodeprep.sh"
```
<a id='configure_cluster'></a>
## Configure Batch AI Cluster
We then upload the scripts we wish to execute onto the fileshare. The fileshare will later be mounted by Batch AI. An alternative to uploading the scripts would be to embedd them inside the Docker image.
```
!az storage file upload --share-name $FILE_SHARE_NAME --source HorovodPytorch/cluster_config/docker.service --path scripts
!az storage file upload --share-name $FILE_SHARE_NAME --source HorovodPytorch/cluster_config/nodeprep.sh --path scripts
```
Below it the command to create the cluster.
```
!az batchai cluster create \
-w $WORKSPACE \
--name $CLUSTER_NAME \
--image UbuntuLTS \
--vm-size $VM_SIZE \
--min $NUM_NODES --max $NUM_NODES \
--afs-name $FILE_SHARE_NAME \
--afs-mount-path extfs \
--user-name $USERNAME \
--password {get_password(dotenv_for())} \
--storage-account-name $STORAGE_ACCOUNT_NAME \
--storage-account-key $storage_account_key \
--nfs $NFS_NAME \
--nfs-mount-path nfs \
--config-file HorovodPytorch/cluster_config/cluster.json
```
Let's check that the cluster was created succesfully.
```
!az batchai cluster show -n $CLUSTER_NAME -w $WORKSPACE
!az batchai cluster list -w $WORKSPACE -o table
!az batchai cluster node list -c $CLUSTER_NAME -w $WORKSPACE -o table
```
|
github_jupyter
|
import sys
sys.path.append("common")
from dotenv import set_key
import os
import json
from utils import get_password, dotenv_for
from pathlib import Path
# Variables for Batch AI - change as necessary
ID = "dtdemo"
GROUP_NAME = f"batch{ID}rg"
STORAGE_ACCOUNT_NAME = f"batch{ID}st"
FILE_SHARE_NAME = f"batch{ID}share"
SELECTED_SUBSCRIPTION = "<YOUR_SUBSCRIPTION>"
WORKSPACE = "workspace"
NUM_NODES = 2
CLUSTER_NAME = "msv100"
VM_SIZE = "Standard_NC24rs_v3"
GPU_TYPE = "V100"
PROCESSES_PER_NODE = 4
LOCATION = "eastus"
NFS_NAME = f"batch{ID}nfs"
USERNAME = "batchai_user"
USE_FAKE = False
DOCKERHUB = os.getenv('DOCKER_REPOSITORY', "masalvar")
DATA = Path("/data")
CONTAINER_NAME = f"batch{ID}container"
DOCKER_PWD = "<YOUR_DOCKER_PWD>"
dotenv_path = dotenv_for()
set_key(dotenv_path, 'DOCKER_PWD', DOCKER_PWD)
set_key(dotenv_path, 'GROUP_NAME', GROUP_NAME)
set_key(dotenv_path, 'FILE_SHARE_NAME', FILE_SHARE_NAME)
set_key(dotenv_path, 'WORKSPACE', WORKSPACE)
set_key(dotenv_path, 'NUM_NODES', str(NUM_NODES))
set_key(dotenv_path, 'CLUSTER_NAME', CLUSTER_NAME)
set_key(dotenv_path, 'GPU_TYPE', GPU_TYPE)
set_key(dotenv_path, 'PROCESSES_PER_NODE', str(PROCESSES_PER_NODE))
set_key(dotenv_path, 'STORAGE_ACCOUNT_NAME', STORAGE_ACCOUNT_NAME)
!az login -o table
!az account set --subscription "$SELECTED_SUBSCRIPTION"
!az account list -o table
!az group create -n $GROUP_NAME -l $LOCATION -o table
json_data = !az storage account create -l $LOCATION -n $STORAGE_ACCOUNT_NAME -g $GROUP_NAME --sku Standard_LRS
print('Storage account {} provisioning state: {}'.format(STORAGE_ACCOUNT_NAME,
json.loads(''.join(json_data))['provisioningState']))
json_data = !az storage account keys list -n $STORAGE_ACCOUNT_NAME -g $GROUP_NAME
storage_account_key = json.loads(''.join([i for i in json_data if 'WARNING' not in i]))[0]['value']
!az storage share create --account-name $STORAGE_ACCOUNT_NAME \
--account-key $storage_account_key --name $FILE_SHARE_NAME
!az storage directory create --share-name $FILE_SHARE_NAME --name scripts \
--account-name $STORAGE_ACCOUNT_NAME --account-key $storage_account_key
!az configure --defaults location=$LOCATION
!az configure --defaults group=$GROUP_NAME
%env AZURE_STORAGE_ACCOUNT $STORAGE_ACCOUNT_NAME
%env AZURE_STORAGE_KEY=$storage_account_key
!az batchai workspace create -n $WORKSPACE -g $GROUP_NAME
if USE_FAKE is False:
!az storage container create --account-name {STORAGE_ACCOUNT_NAME} \
--account-key {storage_account_key} \
--name {CONTAINER_NAME}
if USE_FAKE is False:
# Should take about 20 minutes
!azcopy --source {DATA/"train.tar.gz"} \
--destination https://{STORAGE_ACCOUNT_NAME}.blob.core.windows.net/{CONTAINER_NAME}/train.tar.gz \
--dest-key {storage_account_key} --quiet
if USE_FAKE is False:
!azcopy --source {DATA/"validation.tar.gz"} \
--destination https://{STORAGE_ACCOUNT_NAME}.blob.core.windows.net/{CONTAINER_NAME}/validation.tar.gz \
--dest-key {storage_account_key} --quiet
!az batchai file-server create -n $NFS_NAME --disk-count 4 --disk-size 250 -w $WORKSPACE \
-s Standard_DS4_v2 -u $USERNAME -p {get_password(dotenv_for())} -g $GROUP_NAME --storage-sku Premium_LRS
!az batchai file-server list -o table -w $WORKSPACE -g $GROUP_NAME
json_data = !az batchai file-server list -w $WORKSPACE -g $GROUP_NAME
nfs_ip=json.loads(''.join([i for i in json_data if 'WARNING' not in i]))[0]['mountSettings']['fileServerPublicIp']
nodeprep_script = f"""
#!/usr/bin/env bash
wget https://gist.githubusercontent.com/msalvaris/073c28a9993d58498957294d20d74202/raw/87a78275879f7c9bb8d6fb9de8a2d2996bb66c24/install_azcopy
chmod 777 install_azcopy
sudo ./install_azcopy
mkdir -p /data/imagenet
azcopy --source https://{STORAGE_ACCOUNT_NAME}.blob.core.windows.net/{CONTAINER_NAME}/validation.tar.gz \
--destination /data/imagenet/validation.tar.gz\
--source-key {storage_account_key}\
--quiet
azcopy --source https://{STORAGE_ACCOUNT_NAME}.blob.core.windows.net/{CONTAINER_NAME}/train.tar.gz \
--destination /data/imagenet/train.tar.gz\
--source-key {storage_account_key}\
--quiet
cd /data/imagenet
tar -xzf train.tar.gz
tar -xzf validation.tar.gz
"""
with open('nodeprep.sh', 'w') as f:
f.write(nodeprep_script)
if USE_FAKE:
raise Warning("You should not be running this section if you simply want to use fake data")
if USE_FAKE is False:
!sshpass -p {get_password(dotenv_for())} scp -o "StrictHostKeyChecking=no" nodeprep.sh $USERNAME@{nfs_ip}:~/
if USE_FAKE is False:
!sshpass -p {get_password(dotenv_for())} ssh -o "StrictHostKeyChecking=no" $USERNAME@{nfs_ip} "sudo chmod 777 ~/nodeprep.sh && ./nodeprep.sh"
!az storage file upload --share-name $FILE_SHARE_NAME --source HorovodPytorch/cluster_config/docker.service --path scripts
!az storage file upload --share-name $FILE_SHARE_NAME --source HorovodPytorch/cluster_config/nodeprep.sh --path scripts
!az batchai cluster create \
-w $WORKSPACE \
--name $CLUSTER_NAME \
--image UbuntuLTS \
--vm-size $VM_SIZE \
--min $NUM_NODES --max $NUM_NODES \
--afs-name $FILE_SHARE_NAME \
--afs-mount-path extfs \
--user-name $USERNAME \
--password {get_password(dotenv_for())} \
--storage-account-name $STORAGE_ACCOUNT_NAME \
--storage-account-key $storage_account_key \
--nfs $NFS_NAME \
--nfs-mount-path nfs \
--config-file HorovodPytorch/cluster_config/cluster.json
!az batchai cluster show -n $CLUSTER_NAME -w $WORKSPACE
!az batchai cluster list -w $WORKSPACE -o table
!az batchai cluster node list -c $CLUSTER_NAME -w $WORKSPACE -o table
| 0.27973 | 0.759805 |
# Kiri Core Example: Zero-Shot Classification
## A brief intro
Zero-shot classification is a relatively simple idea.
As with standard classification, a model looks at input and assigns probabilities to a set of labels.
However, with zero-shot, the model was not trained on any particular set of labels.
This makes the classifier extremely flexible for a variety of use cases.
Kiri supports this in 100+ languages ([docs](https://kiri.readthedocs.io/en/latest/)).
```
# If you've got one, change it here.
api_key = None
from kiri import Kiri
if api_key:
kiri = Kiri(api_key=api_key)
else:
kiri = Kiri(local=True)
# An example set of labels, to do with classifying e.g. magazine avrticles.
topic_labels = ['Science', 'Art', 'Politics', 'Food/Cooking']
```
Initialise Kiri for multilingual classification with:
```python
kiri = Kiri(classification_model="multilingual")
```
```
# Some example articles for classification
# About the ISS, from Wikipedia
article_1 = """The International Space Station (ISS) is a modular space station (habitable artificial satellite) in low Earth orbit. It is a multinational collaborative project involving five participating space agencies: NASA (United States), Roscosmos (Russia), JAXA (Japan), ESA (Europe), and CSA (Canada). The ownership and use of the space station is established by intergovernmental treaties and agreements.[9] The station serves as a microgravity and space environment research laboratory in which scientific research is conducted in astrobiology, astronomy, meteorology, physics, and other fields. The ISS is suited for testing the spacecraft systems and equipment required for possible future long-duration missions to the Moon and Mars."""
# Intro to a soup recipe on the NYT
article_2 = """When there’s ground meat in the fridge and beans in the pantry, dinner almost cooks itself into a big pot of chili, the ingredients simmering together as if they had wills of their own. I make chili so often that not making it when everything is on hand feels like a betrayal of the muscle memory I’ve built up over many tomato-splattered years.
But as much as I adore a spicy bowl of chili, there are nights when I’d rather have soup. Especially when I’ve got a bright, vegetable-focused soup on my mind, one that’s filled with white beans and winter greens, spiked with ginger and red-pepper flakes, and rounded out with only a little ground turkey."""
# Brexit news, via Reuters
article_3 = """The pro-independence Scottish National Party (SNP) demanded on Sunday that Prime Minister Boris Johnson pay billions of pounds in compensation to Scotland for the mounting costs and disruption of Brexit. Brexit has strained the bonds that tie together the United Kingdom: England and Wales voted to leave but London, Northern Ireland and Scotland voted to stay.
The SNP, which wants independence for Scotland and is pushing for a second referendum, said Scottish fishermen faced grave disruption due to Brexit.
Johnson’s Conservatives “must apologise to Scottish businesses and pay compensation to Scotland for the long-term damage they are doing to our economy - costing us billions in lost trade and growth,” said Ian Blackford, the SNP’s leader in the British parliament.
Blackford cast Brexit as “an unnecessary act of economic vandalism, which has been inflicted against Scotland’s will”."""
results = kiri.classify(article_1, topic_labels)
label = max(results, key = results.get)
print(label)
results = kiri.classify(article_2, topic_labels)
label = max(results, key = results.get)
print(label)
results = kiri.classify(article_3, topic_labels)
label = max(results, key = results.get)
print(label)
```
|
github_jupyter
|
# If you've got one, change it here.
api_key = None
from kiri import Kiri
if api_key:
kiri = Kiri(api_key=api_key)
else:
kiri = Kiri(local=True)
# An example set of labels, to do with classifying e.g. magazine avrticles.
topic_labels = ['Science', 'Art', 'Politics', 'Food/Cooking']
kiri = Kiri(classification_model="multilingual")
# Some example articles for classification
# About the ISS, from Wikipedia
article_1 = """The International Space Station (ISS) is a modular space station (habitable artificial satellite) in low Earth orbit. It is a multinational collaborative project involving five participating space agencies: NASA (United States), Roscosmos (Russia), JAXA (Japan), ESA (Europe), and CSA (Canada). The ownership and use of the space station is established by intergovernmental treaties and agreements.[9] The station serves as a microgravity and space environment research laboratory in which scientific research is conducted in astrobiology, astronomy, meteorology, physics, and other fields. The ISS is suited for testing the spacecraft systems and equipment required for possible future long-duration missions to the Moon and Mars."""
# Intro to a soup recipe on the NYT
article_2 = """When there’s ground meat in the fridge and beans in the pantry, dinner almost cooks itself into a big pot of chili, the ingredients simmering together as if they had wills of their own. I make chili so often that not making it when everything is on hand feels like a betrayal of the muscle memory I’ve built up over many tomato-splattered years.
But as much as I adore a spicy bowl of chili, there are nights when I’d rather have soup. Especially when I’ve got a bright, vegetable-focused soup on my mind, one that’s filled with white beans and winter greens, spiked with ginger and red-pepper flakes, and rounded out with only a little ground turkey."""
# Brexit news, via Reuters
article_3 = """The pro-independence Scottish National Party (SNP) demanded on Sunday that Prime Minister Boris Johnson pay billions of pounds in compensation to Scotland for the mounting costs and disruption of Brexit. Brexit has strained the bonds that tie together the United Kingdom: England and Wales voted to leave but London, Northern Ireland and Scotland voted to stay.
The SNP, which wants independence for Scotland and is pushing for a second referendum, said Scottish fishermen faced grave disruption due to Brexit.
Johnson’s Conservatives “must apologise to Scottish businesses and pay compensation to Scotland for the long-term damage they are doing to our economy - costing us billions in lost trade and growth,” said Ian Blackford, the SNP’s leader in the British parliament.
Blackford cast Brexit as “an unnecessary act of economic vandalism, which has been inflicted against Scotland’s will”."""
results = kiri.classify(article_1, topic_labels)
label = max(results, key = results.get)
print(label)
results = kiri.classify(article_2, topic_labels)
label = max(results, key = results.get)
print(label)
results = kiri.classify(article_3, topic_labels)
label = max(results, key = results.get)
print(label)
| 0.416322 | 0.972831 |
```
from d2l import torch as d2l
import torch
from torch import nn
def dropout_layer(X, dropout):
assert 0 <= dropout <= 1
# In this case, all elements are dropped out
if dropout == 1:
return torch.zeros_like(X)
# In this case, all elements are kept
if dropout == 0:
return X
mask = (torch.Tensor(X.shape).uniform_(0, 1) > dropout).float()
return mask * X / (1.0-dropout)
num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256
dropout1, dropout2 = 0.2, 0.5
class Net(nn.Module):
def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,
is_training = True):
super(Net, self).__init__()
self.num_inputs = num_inputs
self.is_training = is_training
self.lin1 = nn.Linear(num_inputs, num_hiddens1)
self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)
self.lin3 = nn.Linear(num_hiddens2, num_outputs)
self.relu = nn.ReLU()
def forward(self, X):
H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))
# Use dropout only when training the model
if self.is_training == True:
# Add a dropout layer after the first fully connected layer
H1 = dropout_layer(H1, dropout2)
H2 = self.relu(self.lin2(H1))
if self.is_training == True:
# Add a dropout layer after the second fully connected layer
H2 = dropout_layer(H2, dropout1)
out = self.lin3(H2)
return out
net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)
num_epochs, lr, batch_size = 10, 0.5, 256
loss = nn.CrossEntropyLoss()
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
trainer = torch.optim.SGD(net.parameters(), lr=lr)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
net = nn.Sequential(nn.Flatten(),
nn.Linear(784, 256),
nn.ReLU(),
# Add a dropout layer after the first fully connected layer
nn.Dropout(dropout2),
nn.Linear(256, 256),
nn.ReLU(),
# Add a dropout layer after the second fully connected layer
nn.Dropout(dropout1),
nn.Linear(256, 10))
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.normal_(m.weight, std=0.01)
net.apply(init_weights)
trainer = torch.optim.SGD(net.parameters(), lr=lr)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
num_epochs = 20
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
net_nodrop = nn.Sequential(nn.Flatten(),
nn.Linear(784, 256),
nn.ReLU(),
# Add a dropout layer after the first fully connected layer
nn.Dropout(0),
nn.Linear(256, 256),
nn.ReLU(),
# Add a dropout layer after the second fully connected layer
nn.Dropout(0),
nn.Linear(256, 10))
d2l.train_ch3(net_nodrop, train_iter, test_iter, loss, num_epochs, trainer)
```
|
github_jupyter
|
from d2l import torch as d2l
import torch
from torch import nn
def dropout_layer(X, dropout):
assert 0 <= dropout <= 1
# In this case, all elements are dropped out
if dropout == 1:
return torch.zeros_like(X)
# In this case, all elements are kept
if dropout == 0:
return X
mask = (torch.Tensor(X.shape).uniform_(0, 1) > dropout).float()
return mask * X / (1.0-dropout)
num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256
dropout1, dropout2 = 0.2, 0.5
class Net(nn.Module):
def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,
is_training = True):
super(Net, self).__init__()
self.num_inputs = num_inputs
self.is_training = is_training
self.lin1 = nn.Linear(num_inputs, num_hiddens1)
self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)
self.lin3 = nn.Linear(num_hiddens2, num_outputs)
self.relu = nn.ReLU()
def forward(self, X):
H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))
# Use dropout only when training the model
if self.is_training == True:
# Add a dropout layer after the first fully connected layer
H1 = dropout_layer(H1, dropout2)
H2 = self.relu(self.lin2(H1))
if self.is_training == True:
# Add a dropout layer after the second fully connected layer
H2 = dropout_layer(H2, dropout1)
out = self.lin3(H2)
return out
net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)
num_epochs, lr, batch_size = 10, 0.5, 256
loss = nn.CrossEntropyLoss()
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
trainer = torch.optim.SGD(net.parameters(), lr=lr)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
net = nn.Sequential(nn.Flatten(),
nn.Linear(784, 256),
nn.ReLU(),
# Add a dropout layer after the first fully connected layer
nn.Dropout(dropout2),
nn.Linear(256, 256),
nn.ReLU(),
# Add a dropout layer after the second fully connected layer
nn.Dropout(dropout1),
nn.Linear(256, 10))
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.normal_(m.weight, std=0.01)
net.apply(init_weights)
trainer = torch.optim.SGD(net.parameters(), lr=lr)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
num_epochs = 20
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
net_nodrop = nn.Sequential(nn.Flatten(),
nn.Linear(784, 256),
nn.ReLU(),
# Add a dropout layer after the first fully connected layer
nn.Dropout(0),
nn.Linear(256, 256),
nn.ReLU(),
# Add a dropout layer after the second fully connected layer
nn.Dropout(0),
nn.Linear(256, 10))
d2l.train_ch3(net_nodrop, train_iter, test_iter, loss, num_epochs, trainer)
| 0.954137 | 0.721596 |
```
from switss import benchmarks as bm
from switss.model import DTMC, MDP, ReachabilityForm
from switss.problem import QSHeur, MILPExact, InverseReachabilityInitializer, InverseFrequencyInitializer
import matplotlib.pyplot as plt
```
# Crowds-2-8
```
M = DTMC.from_prism_model("datasets/crowds.pm",
prism_constants={("TotalRuns", 8), ("CrowdSize", 2)},
extra_labels={("target","observe0>1")})
rf,_,_ = ReachabilityForm.reduce(M, "init", "target")
```
## Standard Initializer
```
qsheur = QSHeur(iterations=5,solver="cbc")
data_min, data_max = bm.run(rf, [qsheur, qsheur],
["min","max"], from_thr=0.01,
to_thr=0.5, step=0.01, debug=False)
fig, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize=(14,12))
bm.render(data_max, mode="states-thr", ax=ax1, title="QSHeur Max-Form States vs. Threshold")
bm.render(data_min, mode="states-thr", ax=ax2, title="QSHeur Min-Form States vs. Threshold")
bm.render(data_max, mode="proc_time-thr", ax=ax3, title="QSHeur Max-Form Time vs. Threshold")
bm.render(data_min, mode="proc_time-thr", ax=ax4, title="QSHeur Min-Form Time vs. Threshold")
```
## Inverse Reachability Initializer
```
qsheur = QSHeur(iterations=5,initializertype=InverseReachabilityInitializer, solver="cbc")
data_min, data_max = bm.run(rf, [qsheur, qsheur],
["min", "max"], from_thr=0.01, to_thr=0.5,
step=0.01, debug=False)
fig, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize=(14,12))
bm.render(data_max, mode="states-thr", ax=ax1, title="QSHeur Max-Form States vs. Threshold")
bm.render(data_min, mode="states-thr", ax=ax2, title="QSHeur Min-Form States vs. Threshold")
bm.render(data_max, mode="proc_time-thr", ax=ax3, title="QSHeur Max-Form Time vs. Threshold")
bm.render(data_min, mode="proc_time-thr", ax=ax4, title="QSHeur Min-Form Time vs. Threshold")
```
## Inverse Expected Frequency Initializer
```
qsheur = QSHeur(iterations=5,initializertype=InverseFrequencyInitializer, solver="cbc")
data_min, data_max = bm.run(rf, [qsheur, qsheur],
["min", "max"], from_thr=0.01,
to_thr=0.5, step=0.01, debug=False)
fig, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize=(14,12))
bm.render(data_max, mode="states-thr", ax=ax1, title="QSHeur Max-Form States vs. Threshold")
bm.render(data_min, mode="states-thr", ax=ax2, title="QSHeur Min-Form States vs. Threshold")
bm.render(data_max, mode="proc_time-thr", ax=ax3, title="QSHeur Max-Form Time vs. Threshold")
bm.render(data_min, mode="proc_time-thr", ax=ax4, title="QSHeur Min-Form Time vs. Threshold")
```
# Consensus-2-4
```
M = MDP.from_file("datasets/consensus-2-4.lab", "datasets/consensus-2-4.tra")
rf,_,_ = ReachabilityForm.reduce(M, "init", "finished")
qsheur = QSHeur(iterations=2,solver="cbc")
milpexact = MILPExact(solver="cbc")
methods = [qsheur, qsheur, milpexact]
results = bm.run(rf, methods, ["min", "max", "min"], from_thr=0.1, to_thr=1, step=0.1, debug=False)
data_qsmin, data_qsmax, data_milpmin = results
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14,6))
bm.render(data_qsmax, mode="laststates-thr", ax=ax1, title="QSHeur Max-Form States vs. Threshold")
bm.render(data_qsmin, mode="laststates-thr", ax=ax2, title="QSHeur/MILP Min-Form States vs. Threshold")
bm.render(data_milpmin, mode="laststates-thr", ax=ax2)
```
# CSMA-3-2
```
M = MDP.from_file("datasets/csma-3-2.lab", "datasets/csma-3-2.tra")
rf,_,_ = ReachabilityForm.reduce(M, "init", "all_delivered")
qsheur = QSHeur(iterations=2,solver="cbc")
methods = [qsheur, qsheur]
results = bm.run(rf, methods, ["min", "max"], from_thr=0.1, to_thr=1, step=0.1, debug=False)
data_qsmin, data_qsmax = results
fig, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize=(14,12))
bm.render(data_qsmax, mode="states-thr", ax=ax1, title="QSHeur Max-Form States vs. Threshold")
bm.render(data_qsmin, mode="states-thr", ax=ax2, title="QSHeur Min-Form States vs. Threshold")
bm.render(data_qsmax, mode="proc_time-thr", ax=ax3, title="QSHeur Max-Form Time vs. Threshold")
bm.render(data_qsmin, mode="proc_time-thr", ax=ax4, title="QSHeur Min-Form Time vs. Threshold")
```
|
github_jupyter
|
from switss import benchmarks as bm
from switss.model import DTMC, MDP, ReachabilityForm
from switss.problem import QSHeur, MILPExact, InverseReachabilityInitializer, InverseFrequencyInitializer
import matplotlib.pyplot as plt
M = DTMC.from_prism_model("datasets/crowds.pm",
prism_constants={("TotalRuns", 8), ("CrowdSize", 2)},
extra_labels={("target","observe0>1")})
rf,_,_ = ReachabilityForm.reduce(M, "init", "target")
qsheur = QSHeur(iterations=5,solver="cbc")
data_min, data_max = bm.run(rf, [qsheur, qsheur],
["min","max"], from_thr=0.01,
to_thr=0.5, step=0.01, debug=False)
fig, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize=(14,12))
bm.render(data_max, mode="states-thr", ax=ax1, title="QSHeur Max-Form States vs. Threshold")
bm.render(data_min, mode="states-thr", ax=ax2, title="QSHeur Min-Form States vs. Threshold")
bm.render(data_max, mode="proc_time-thr", ax=ax3, title="QSHeur Max-Form Time vs. Threshold")
bm.render(data_min, mode="proc_time-thr", ax=ax4, title="QSHeur Min-Form Time vs. Threshold")
qsheur = QSHeur(iterations=5,initializertype=InverseReachabilityInitializer, solver="cbc")
data_min, data_max = bm.run(rf, [qsheur, qsheur],
["min", "max"], from_thr=0.01, to_thr=0.5,
step=0.01, debug=False)
fig, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize=(14,12))
bm.render(data_max, mode="states-thr", ax=ax1, title="QSHeur Max-Form States vs. Threshold")
bm.render(data_min, mode="states-thr", ax=ax2, title="QSHeur Min-Form States vs. Threshold")
bm.render(data_max, mode="proc_time-thr", ax=ax3, title="QSHeur Max-Form Time vs. Threshold")
bm.render(data_min, mode="proc_time-thr", ax=ax4, title="QSHeur Min-Form Time vs. Threshold")
qsheur = QSHeur(iterations=5,initializertype=InverseFrequencyInitializer, solver="cbc")
data_min, data_max = bm.run(rf, [qsheur, qsheur],
["min", "max"], from_thr=0.01,
to_thr=0.5, step=0.01, debug=False)
fig, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize=(14,12))
bm.render(data_max, mode="states-thr", ax=ax1, title="QSHeur Max-Form States vs. Threshold")
bm.render(data_min, mode="states-thr", ax=ax2, title="QSHeur Min-Form States vs. Threshold")
bm.render(data_max, mode="proc_time-thr", ax=ax3, title="QSHeur Max-Form Time vs. Threshold")
bm.render(data_min, mode="proc_time-thr", ax=ax4, title="QSHeur Min-Form Time vs. Threshold")
M = MDP.from_file("datasets/consensus-2-4.lab", "datasets/consensus-2-4.tra")
rf,_,_ = ReachabilityForm.reduce(M, "init", "finished")
qsheur = QSHeur(iterations=2,solver="cbc")
milpexact = MILPExact(solver="cbc")
methods = [qsheur, qsheur, milpexact]
results = bm.run(rf, methods, ["min", "max", "min"], from_thr=0.1, to_thr=1, step=0.1, debug=False)
data_qsmin, data_qsmax, data_milpmin = results
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14,6))
bm.render(data_qsmax, mode="laststates-thr", ax=ax1, title="QSHeur Max-Form States vs. Threshold")
bm.render(data_qsmin, mode="laststates-thr", ax=ax2, title="QSHeur/MILP Min-Form States vs. Threshold")
bm.render(data_milpmin, mode="laststates-thr", ax=ax2)
M = MDP.from_file("datasets/csma-3-2.lab", "datasets/csma-3-2.tra")
rf,_,_ = ReachabilityForm.reduce(M, "init", "all_delivered")
qsheur = QSHeur(iterations=2,solver="cbc")
methods = [qsheur, qsheur]
results = bm.run(rf, methods, ["min", "max"], from_thr=0.1, to_thr=1, step=0.1, debug=False)
data_qsmin, data_qsmax = results
fig, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize=(14,12))
bm.render(data_qsmax, mode="states-thr", ax=ax1, title="QSHeur Max-Form States vs. Threshold")
bm.render(data_qsmin, mode="states-thr", ax=ax2, title="QSHeur Min-Form States vs. Threshold")
bm.render(data_qsmax, mode="proc_time-thr", ax=ax3, title="QSHeur Max-Form Time vs. Threshold")
bm.render(data_qsmin, mode="proc_time-thr", ax=ax4, title="QSHeur Min-Form Time vs. Threshold")
| 0.730963 | 0.672076 |
# General Relativity - Tensor Calculator
## Compute Reimann Tensors, Ricci Tensors, Christoffel Symbols, and the Ricci Scalar
### A simple easy-to-use Jupyter Notebook to help compute Christoffel symbols, the Reimann and Ricci Tensors, and the Ricci Scalar using SymPy with a given spacetime metric in 3 + 1 dimensions
By: Emaad Paracha
## Instructions:
The first two sections (Sections 0 and 1) of this notebook are "configuration" modules, while the last section (Section 2) is the "computation" module.<br/>
Section 0 imports the dependencies. <br/>
Section 1 is where all the functions are defined that compute the matrices for Christoffel symbols, and the Reimann and Ricci tensors, along with functions that would give individual values given specific indices. <b> <i> Normally you do not need to edit this section unless you want to play around with the code. I have commented some parts for ease of understanding. </i> </b> <br/>
Section 2 is the computation section, this is where you edit and input your metric, the symbols (variables) that your metric depends on (in order), and the initial orientation of your metric (whether you have $g^{\mu \nu}$ or $g_{\mu \nu}$).
## 0. Importing Dependencies
```
from sympy import *
from sympy.matrices import Matrix, zeros
from sympy import symbols
from sympy import Function, Symbol, Derivative
from IPython.display import display, Latex
init_printing(use_unicode=True)
```
## 1. Defining Functions
```
orientationError = 'ERROR: Please enter either "down" or "up" for the orientation of your metric'
indexError = 'ERROR: Please ensure your index values are integers between 0 and 3'
#Function to compute all Christoffel symbols
def ChristoffelSymbols(metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
#Having the right metric components corresponding to the upstairs or downstairs metric
if (orientation == "down"):
gdndn = g
gupup = gdndn.inv()
else:
gupup = g
gdndn = gupup.inv()
#Creating matrix for variables
dava = Matrix(sym)
#Creating empty Gamma matrix
Gamma = Matrix([[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]])
#Compute Christoffels
for s in range(4):
for m in range(4):
for n in range(4):
for l in range(4):
Gamma[(m,n)][l] += ((1/2)*gupup[(m,s)])*((diff(gdndn[(s,l)],dava[n]))
+ (diff(gdndn[(n,s)],dava[l]))
- (diff(gdndn[(n,l)],dava[s])))
return simplify(Gamma)
#If we just need a specific Christoffel symbol
def GiveChristoffel(mu,nu,lamb,metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
if (isinstance(mu, int) and isinstance(nu, int) and isinstance(lamb, int) and mu < 4 and mu >=0 and nu < 4 and nu >=0 and lamb < 4 and lamb >=0):
chris = ChristoffelSymbols(metricpack)
return chris[(mu,nu)][lamb]
else:
print(indexError)
#Computing all Reimann Tensors
def ReimannTensor(metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
#Christoffels
Gamma = ChristoffelSymbols(metricpack)
#Creating matrix for variables
dava = Matrix(sym)
#Empty Reimann Matrix
Reimann = Matrix([[[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]],
[[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]],
[[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]],
[[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]]])
#Computing the Reimanns
for p in range(4):
for s in range(4):
for m in range(4):
for n in range(4):
Reimann[(p,s)][m][n] = (diff(Gamma[(p,s)][n],dava[m]) - diff(Gamma[(p,s)][m],dava[n]))
l = 0
p = 0
s = 0
m = 0
n = 0
for l in range(4):
for p in range(4):
for s in range(4):
for m in range(4):
for n in range(4):
Reimann[(p,s)][m][n] += (Gamma[(l,s)][n]*Gamma[(p,l)][m] - Gamma[(l,s)][m]*Gamma[(p,l)][n])
return simplify(Reimann)
#If we just need a specific Reimann tensor
def GiveReimann(pho,sigma,mu,nu,metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
if (isinstance(mu, int) and isinstance(nu, int) and isinstance(pho, int) and isinstance(sigma, int) and mu < 4 and mu >=0 and nu < 4 and nu >=0 and sigma < 4 and sigma >=0 and pho < 4 and pho >= 0):
rem = ReimannTensor(metricpack)
return rem[(pho,sigma)][mu][nu]
else:
print(indexError)
#Compute Ricci Tensor
def RicciTensor(metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
remm = ReimannTensor(metricpack)
Ricci = Matrix([[0,0,0,0],[0,0,0,0],
[0,0,0,0],[0,0,0,0]])
for x in range(4):
for y in range(4):
for z in range(4):
Ricci[y,z] += remm[x,y][z][x]
return simplify(Ricci)
#If we just need a specific Ricci
def GiveRicci(mu,nu,metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
ricci = RicciTensor(metricpack)
return simplify(ricci[mu,nu])
#To compute Ricci Scalar
def RicciScalar(metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
#Having the right metric components corresponding to the upstairs or downstairs metric
if (orientation == "down"):
gdndn = g
gupup = gdndn.inv()
else:
gupup = g
ricciscalare = 0
ricciten = RicciTensor(metricpack)
for i in range(4):
ricciscalare += gupup[i,i]*ricciten[i,i]
return simplify(ricciscalare)
```
## 2. Computation
<b>Instructions:</b> This is the section where you input your metric, the symbols (variables) that your metric depends on (in order), and the initial orientation of your metric (whether you have $g^{\mu \nu}$ or $g_{\mu \nu}$)
This example is for a metric with the following line element:
$$ ds^{2} = dt^{2} - t^{2}dr^{2} - t^{2}r^{2}d\theta^{2} - t^{2}r^{2}sin^{2}\theta d\phi^{2} $$
For ease of computations, I have assigned the following symbols to the variables the metric depends on:
$$ t = t $$
$$ r = r $$
$$ h = \theta $$
$$ p = \phi $$
These symbols are accordingly edited in the two lines commented with "Edit symbols here"
Next, enter the metric tensor in a matrix, labelled $\texttt{gtensor}$ below, of the form:
$$ \begin{bmatrix}
g_{00} & g_{01} & g_{02} & g_{03} \\
g_{10} & g_{11} & g_{12} & g_{13} \\
g_{20} & g_{21} & g_{22} & g_{23} \\
g_{30} & g_{31} & g_{32} & g_{33}
\end{bmatrix} $$
for $g_{\mu \nu}$ or similarly in the same format for $g^{\mu \nu}$.
Lastly, for the metric you entered, please denote whether the indices are "up" or "down", for example if your metric was $g_{\mu \nu}$, enter "down" for the orientation, and if your metric was $g^{\mu \nu}$, enter "up" for your orientation.
The code then packs these three items, the tensor, variables, and orientation, into a list, denoted by $\texttt{metric}$, and you have a list of functions at your disposal to compute either the Reimann or Ricci tensors, the Ricci Scalar, or Christoffel symbols.
To view your metric in a matrix form, call $\texttt{gtensor}$.
To view all possible Christoffel symbols in a 4x4x4 matrix, use $\texttt{ChristoffelSymbols(metric)}$.
To view a specific Christoffel symbol, i.e. $\Gamma^{3}_{23}$, use $\texttt{GiveChristoffel(3,2,3,metric)}$, where the first number, 3, is the upper index and 2 and 3 are the two lower indices in order.
To view the Reimann tensor in a 4x4x4x4 matrix, use $\texttt{ReimannTensor(metric)}$.
To view a specific component of the Reimann tensor, i.e. $R^{1}_{313}$, use $\texttt{GiveReimann(1,3,1,3,metric)}$, where the first number, 1, is the upper index and 3, 1, and 3 are the three lower indices in order.
To view the Ricci tensor in a 4x4 matrix, use $\texttt{RicciTensor(metric)}$.
To view a specific component of the Ricci tensor, i.e. $R_{33}$, use $\texttt{GiveRicci(3,3,metric)}$, where the numbers correspond to the two lower indices in order.
Lastly, to compute the Ricci scalar for the metric, simply use $\texttt{RicciScalar(metric)}$ to get the Ricci scalar.
All examples have been computed below.
```
t, r, h, p = symbols('t r h p') #Edit symbols here
variables = [t,r,h,p] #Edit symbols here
gtensor = Matrix([[1,0,0,0], #Corresponding to g00, g01, g02, g03
[0,-t**2,0,0], #Corresponding to g10, g11, g12, g13
[0,0,(-t**2)*r**2,0], #Corresponding to g20, g21, g22, g23
[0,0,0,(-t**2)*(r**2)*(sin(h)**2)]]) #Corresponding to g30, g31, g32, g33
orientation = "down" #Orientation of the gtensor, whether it is g_mu_nu ("down") or g^mu^nu ("up")
metric = [gtensor,variables,orientation] #List of the gtensor, variables and orientation to be used
#Display the metric tensor:
gtensor
#Display a matrix of all possible Christoffel Symbols:
ChristoffelSymbols(metric)
#Compute an example Christoffel, here we are computing Γ^(3)_(2,3)
GiveChristoffel(3,2,3,metric)
#Display the Reimann Tensor:
ReimannTensor(metric)
#Compute a specific Reimann, here we are computing R^(1)_(3,1,3)
GiveReimann(1,3,1,3,metric)
#Display the Ricci Tensor:
RicciTensor(metric)
#Compute a specific Ricci, here we are computing R_(3,3)
GiveRicci(3,3,metric)
#Display the Ricci Scalar:
RicciScalar(metric)
```
|
github_jupyter
|
from sympy import *
from sympy.matrices import Matrix, zeros
from sympy import symbols
from sympy import Function, Symbol, Derivative
from IPython.display import display, Latex
init_printing(use_unicode=True)
orientationError = 'ERROR: Please enter either "down" or "up" for the orientation of your metric'
indexError = 'ERROR: Please ensure your index values are integers between 0 and 3'
#Function to compute all Christoffel symbols
def ChristoffelSymbols(metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
#Having the right metric components corresponding to the upstairs or downstairs metric
if (orientation == "down"):
gdndn = g
gupup = gdndn.inv()
else:
gupup = g
gdndn = gupup.inv()
#Creating matrix for variables
dava = Matrix(sym)
#Creating empty Gamma matrix
Gamma = Matrix([[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]])
#Compute Christoffels
for s in range(4):
for m in range(4):
for n in range(4):
for l in range(4):
Gamma[(m,n)][l] += ((1/2)*gupup[(m,s)])*((diff(gdndn[(s,l)],dava[n]))
+ (diff(gdndn[(n,s)],dava[l]))
- (diff(gdndn[(n,l)],dava[s])))
return simplify(Gamma)
#If we just need a specific Christoffel symbol
def GiveChristoffel(mu,nu,lamb,metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
if (isinstance(mu, int) and isinstance(nu, int) and isinstance(lamb, int) and mu < 4 and mu >=0 and nu < 4 and nu >=0 and lamb < 4 and lamb >=0):
chris = ChristoffelSymbols(metricpack)
return chris[(mu,nu)][lamb]
else:
print(indexError)
#Computing all Reimann Tensors
def ReimannTensor(metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
#Christoffels
Gamma = ChristoffelSymbols(metricpack)
#Creating matrix for variables
dava = Matrix(sym)
#Empty Reimann Matrix
Reimann = Matrix([[[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]],
[[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]],
[[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]],
[[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],
[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]]])
#Computing the Reimanns
for p in range(4):
for s in range(4):
for m in range(4):
for n in range(4):
Reimann[(p,s)][m][n] = (diff(Gamma[(p,s)][n],dava[m]) - diff(Gamma[(p,s)][m],dava[n]))
l = 0
p = 0
s = 0
m = 0
n = 0
for l in range(4):
for p in range(4):
for s in range(4):
for m in range(4):
for n in range(4):
Reimann[(p,s)][m][n] += (Gamma[(l,s)][n]*Gamma[(p,l)][m] - Gamma[(l,s)][m]*Gamma[(p,l)][n])
return simplify(Reimann)
#If we just need a specific Reimann tensor
def GiveReimann(pho,sigma,mu,nu,metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
if (isinstance(mu, int) and isinstance(nu, int) and isinstance(pho, int) and isinstance(sigma, int) and mu < 4 and mu >=0 and nu < 4 and nu >=0 and sigma < 4 and sigma >=0 and pho < 4 and pho >= 0):
rem = ReimannTensor(metricpack)
return rem[(pho,sigma)][mu][nu]
else:
print(indexError)
#Compute Ricci Tensor
def RicciTensor(metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
remm = ReimannTensor(metricpack)
Ricci = Matrix([[0,0,0,0],[0,0,0,0],
[0,0,0,0],[0,0,0,0]])
for x in range(4):
for y in range(4):
for z in range(4):
Ricci[y,z] += remm[x,y][z][x]
return simplify(Ricci)
#If we just need a specific Ricci
def GiveRicci(mu,nu,metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
ricci = RicciTensor(metricpack)
return simplify(ricci[mu,nu])
#To compute Ricci Scalar
def RicciScalar(metricpack):
g, sym, orientation = metricpack
if (orientation != "down" and orientation != "up"):
#Ensuring the correct orientation values are inputted
print(orientationError)
else:
#Having the right metric components corresponding to the upstairs or downstairs metric
if (orientation == "down"):
gdndn = g
gupup = gdndn.inv()
else:
gupup = g
ricciscalare = 0
ricciten = RicciTensor(metricpack)
for i in range(4):
ricciscalare += gupup[i,i]*ricciten[i,i]
return simplify(ricciscalare)
t, r, h, p = symbols('t r h p') #Edit symbols here
variables = [t,r,h,p] #Edit symbols here
gtensor = Matrix([[1,0,0,0], #Corresponding to g00, g01, g02, g03
[0,-t**2,0,0], #Corresponding to g10, g11, g12, g13
[0,0,(-t**2)*r**2,0], #Corresponding to g20, g21, g22, g23
[0,0,0,(-t**2)*(r**2)*(sin(h)**2)]]) #Corresponding to g30, g31, g32, g33
orientation = "down" #Orientation of the gtensor, whether it is g_mu_nu ("down") or g^mu^nu ("up")
metric = [gtensor,variables,orientation] #List of the gtensor, variables and orientation to be used
#Display the metric tensor:
gtensor
#Display a matrix of all possible Christoffel Symbols:
ChristoffelSymbols(metric)
#Compute an example Christoffel, here we are computing Γ^(3)_(2,3)
GiveChristoffel(3,2,3,metric)
#Display the Reimann Tensor:
ReimannTensor(metric)
#Compute a specific Reimann, here we are computing R^(1)_(3,1,3)
GiveReimann(1,3,1,3,metric)
#Display the Ricci Tensor:
RicciTensor(metric)
#Compute a specific Ricci, here we are computing R_(3,3)
GiveRicci(3,3,metric)
#Display the Ricci Scalar:
RicciScalar(metric)
| 0.57069 | 0.906322 |
# Measures of Central Tendency
By Evgenia "Jenny" Nitishinskaya, Maxwell Margenot, and Delaney Mackenzie.
Part of the Quantopian Lecture Series:
* [www.quantopian.com/lectures](https://www.quantopian.com/lectures)
* [github.com/quantopian/research_public](https://github.com/quantopian/research_public)
---
In this notebook we will discuss ways to summarize a set of data using a single number. The goal is to capture information about the distribution of data.
# Arithmetic mean
The arithmetic mean is used very frequently to summarize numerical data, and is usually the one assumed to be meant by the word "average." It is defined as the sum of the observations divided by the number of observations:
$$\mu = \frac{\sum_{i=1}^N X_i}{N}$$
where $X_1, X_2, \ldots , X_N$ are our observations.
```
# Two useful statistical libraries
import scipy.stats as stats
import numpy as np
# We'll use these two data sets as examples
x1 = [1, 2, 2, 3, 4, 5, 5, 7]
x2 = x1 + [100]
print 'Mean of x1:', sum(x1), '/', len(x1), '=', np.mean(x1)
print 'Mean of x2:', sum(x2), '/', len(x2), '=', np.mean(x2)
```
We can also define a <i>weighted</i> arithmetic mean, which is useful for explicitly specifying the number of times each observation should be counted. For instance, in computing the average value of a portfolio, it is more convenient to say that 70% of your stocks are of type X rather than making a list of every share you hold.
The weighted arithmetic mean is defined as
$$\sum_{i=1}^n w_i X_i $$
where $\sum_{i=1}^n w_i = 1$. In the usual arithmetic mean, we have $w_i = 1/n$ for all $i$.
# Median
The median of a set of data is the number which appears in the middle of the list when it is sorted in increasing or decreasing order. When we have an odd number $n$ of data points, this is simply the value in position $(n+1)/2$. When we have an even number of data points, the list splits in half and there is no item in the middle; so we define the median as the average of the values in positions $n/2$ and $(n+2)/2$.
The median is less affected by extreme values in the data than the arithmetic mean. It tells us the value that splits the data set in half, but not how much smaller or larger the other values are.
```
print 'Median of x1:', np.median(x1)
print 'Median of x2:', np.median(x2)
```
# Mode
The mode is the most frequently occuring value in a data set. It can be applied to non-numerical data, unlike the mean and the median. One situation in which it is useful is for data whose possible values are independent. For example, in the outcomes of a weighted die, coming up 6 often does not mean it is likely to come up 5; so knowing that the data set has a mode of 6 is more useful than knowing it has a mean of 4.5.
```
# Scipy has a built-in mode function, but it will return exactly one value
# even if two values occur the same number of times, or if no value appears more than once
print 'One mode of x1:', stats.mode(x1)[0][0]
# So we will write our own
def mode(l):
# Count the number of times each element appears in the list
counts = {}
for e in l:
if e in counts:
counts[e] += 1
else:
counts[e] = 1
# Return the elements that appear the most times
maxcount = 0
modes = {}
for (key, value) in counts.iteritems():
if value > maxcount:
maxcount = value
modes = {key}
elif value == maxcount:
modes.add(key)
if maxcount > 1 or len(l) == 1:
return list(modes)
return 'No mode'
print 'All of the modes of x1:', mode(x1)
```
For data that can take on many different values, such as returns data, there may not be any values that appear more than once. In this case we can bin values, like we do when constructing a histogram, and then find the mode of the data set where each value is replaced with the name of its bin. That is, we find which bin elements fall into most often.
```
# Get return data for an asset and compute the mode of the data set
start = '2014-01-01'
end = '2015-01-01'
pricing = get_pricing('SPY', fields='price', start_date=start, end_date=end)
returns = pricing.pct_change()[1:]
print 'Mode of returns:', mode(returns)
# Since all of the returns are distinct, we use a frequency distribution to get an alternative mode.
# np.histogram returns the frequency distribution over the bins as well as the endpoints of the bins
hist, bins = np.histogram(returns, 20) # Break data up into 20 bins
maxfreq = max(hist)
# Find all of the bins that are hit with frequency maxfreq, then print the intervals corresponding to them
print 'Mode of bins:', [(bins[i], bins[i+1]) for i, j in enumerate(hist) if j == maxfreq]
```
# Geometric mean
While the arithmetic mean averages using addition, the geometric mean uses multiplication:
$$ G = \sqrt[n]{X_1X_1\ldots X_n} $$
for observations $X_i \geq 0$. We can also rewrite it as an arithmetic mean using logarithms:
$$ \ln G = \frac{\sum_{i=1}^n \ln X_i}{n} $$
The geometric mean is always less than or equal to the arithmetic mean (when working with nonnegative observations), with equality only when all of the observations are the same.
```
# Use scipy's gmean function to compute the geometric mean
print 'Geometric mean of x1:', stats.gmean(x1)
print 'Geometric mean of x2:', stats.gmean(x2)
```
What if we want to compute the geometric mean when we have negative observations? This problem is easy to solve in the case of asset returns, where our values are always at least $-1$. We can add 1 to a return $R_t$ to get $1 + R_t$, which is the ratio of the price of the asset for two consecutive periods (as opposed to the percent change between the prices, $R_t$). This quantity will always be nonnegative. So we can compute the geometric mean return,
$$ R_G = \sqrt[T]{(1 + R_1)\ldots (1 + R_T)} - 1$$
```
# Add 1 to every value in the returns array and then compute R_G
ratios = returns + np.ones(len(returns))
R_G = stats.gmean(ratios) - 1
print 'Geometric mean of returns:', R_G
```
The geometric mean is defined so that if the rate of return over the whole time period were constant and equal to $R_G$, the final price of the security would be the same as in the case of returns $R_1, \ldots, R_T$.
```
T = len(returns)
init_price = pricing[0]
final_price = pricing[T]
print 'Initial price:', init_price
print 'Final price:', final_price
print 'Final price as computed with R_G:', init_price*(1 + R_G)**T
```
# Harmonic mean
The harmonic mean is less commonly used than the other types of means. It is defined as
$$ H = \frac{n}{\sum_{i=1}^n \frac{1}{X_i}} $$
As with the geometric mean, we can rewrite the harmonic mean to look like an arithmetic mean. The reciprocal of the harmonic mean is the arithmetic mean of the reciprocals of the observations:
$$ \frac{1}{H} = \frac{\sum_{i=1}^n \frac{1}{X_i}}{n} $$
The harmonic mean for nonnegative numbers $X_i$ is always at most the geometric mean (which is at most the arithmetic mean), and they are equal only when all of the observations are equal.
```
print 'Harmonic mean of x1:', stats.hmean(x1)
print 'Harmonic mean of x2:', stats.hmean(x2)
```
The harmonic mean can be used when the data can be naturally phrased in terms of ratios. For instance, in the dollar-cost averaging strategy, a fixed amount is spent on shares of a stock at regular intervals. The higher the price of the stock, then, the fewer shares an investor following this strategy buys. The average (arithmetic mean) amount they pay for the stock is the harmonic mean of the prices.
# Point Estimates Can Be Deceiving
Means by nature hide a lot of information, as they collapse entire distributions into one number. As a result often 'point estimates' or metrics that use one number, can disguise large programs in your data. You should be careful to ensure that you are not losing key information by summarizing your data, and you should rarely, if ever, use a mean without also referring to a measure of spread.
## Underlying Distribution Can be Wrong
Even when you are using the right metrics for mean and spread, they can make no sense if your underlying distribution is not what you think it is. For instance, using standard deviation to measure frequency of an event will usually assume normality. Try not to assume distributions unless you have to, in which case you should rigourously check that the data do fit the distribution you are assuming.
## References
* "Quantitative Investment Analysis", by DeFusco, McLeavey, Pinto, and Runkle
*This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
|
github_jupyter
|
# Two useful statistical libraries
import scipy.stats as stats
import numpy as np
# We'll use these two data sets as examples
x1 = [1, 2, 2, 3, 4, 5, 5, 7]
x2 = x1 + [100]
print 'Mean of x1:', sum(x1), '/', len(x1), '=', np.mean(x1)
print 'Mean of x2:', sum(x2), '/', len(x2), '=', np.mean(x2)
print 'Median of x1:', np.median(x1)
print 'Median of x2:', np.median(x2)
# Scipy has a built-in mode function, but it will return exactly one value
# even if two values occur the same number of times, or if no value appears more than once
print 'One mode of x1:', stats.mode(x1)[0][0]
# So we will write our own
def mode(l):
# Count the number of times each element appears in the list
counts = {}
for e in l:
if e in counts:
counts[e] += 1
else:
counts[e] = 1
# Return the elements that appear the most times
maxcount = 0
modes = {}
for (key, value) in counts.iteritems():
if value > maxcount:
maxcount = value
modes = {key}
elif value == maxcount:
modes.add(key)
if maxcount > 1 or len(l) == 1:
return list(modes)
return 'No mode'
print 'All of the modes of x1:', mode(x1)
# Get return data for an asset and compute the mode of the data set
start = '2014-01-01'
end = '2015-01-01'
pricing = get_pricing('SPY', fields='price', start_date=start, end_date=end)
returns = pricing.pct_change()[1:]
print 'Mode of returns:', mode(returns)
# Since all of the returns are distinct, we use a frequency distribution to get an alternative mode.
# np.histogram returns the frequency distribution over the bins as well as the endpoints of the bins
hist, bins = np.histogram(returns, 20) # Break data up into 20 bins
maxfreq = max(hist)
# Find all of the bins that are hit with frequency maxfreq, then print the intervals corresponding to them
print 'Mode of bins:', [(bins[i], bins[i+1]) for i, j in enumerate(hist) if j == maxfreq]
# Use scipy's gmean function to compute the geometric mean
print 'Geometric mean of x1:', stats.gmean(x1)
print 'Geometric mean of x2:', stats.gmean(x2)
# Add 1 to every value in the returns array and then compute R_G
ratios = returns + np.ones(len(returns))
R_G = stats.gmean(ratios) - 1
print 'Geometric mean of returns:', R_G
T = len(returns)
init_price = pricing[0]
final_price = pricing[T]
print 'Initial price:', init_price
print 'Final price:', final_price
print 'Final price as computed with R_G:', init_price*(1 + R_G)**T
print 'Harmonic mean of x1:', stats.hmean(x1)
print 'Harmonic mean of x2:', stats.hmean(x2)
| 0.746509 | 0.992795 |
```
import requests
from bs4 import BeautifulSoup
import time
import MySQLdb
import math
import json
import random
import re
# Librarii model
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import torchvision.utils as utils
import cv2
import time
import os
import copy
import warnings
import pdb;
import torch.nn.functional as F
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
import matplotlib.image as mpimg
from PIL import *
class Attention(nn.Module):
def __init__(self, in_features):
super(Attention, self).__init__()
self.op = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=64, kernel_size=3, padding=1),
nn.Conv2d(in_channels=64, out_channels=16, kernel_size=3, padding=1),
nn.Conv2d(in_channels=16, out_channels=1, kernel_size=1, padding=0))
def forward(self, l):
N, C, W, H = l.size()
c = self.op(l)
a = torch.sigmoid(c)
g = torch.mul(a.expand_as(l), l)
return a.view(N,1,W,H), g
class ModifyVGG(nn.Module):
def __init__(self, im_size, num_classes):
super(ModifyVGG,self).__init__()
vgg_model = models.vgg16(pretrained=True)
# everything before layer 4
self.before_att = nn.Sequential(*list(vgg_model.features.children())[:4])
# added layer
self.attention1 = Attention(in_features=64)
self.attention2 = Attention(in_features=128)
self.attention3 = Attention(in_features=256)
# everything after layer 4
self.after_att = nn.Sequential(*list(vgg_model.features.children())[4:])
self.avg_pool = vgg_model.avgpool
self.classifier = nn.Sequential(*list(vgg_model.classifier.children()))
def forward(self,x):
# run vgg before layer 4
x = self.before_att(x)
# run added layer
c1, x = self.attention1(x)
# run everything after layer 4 (as in the original vgg model)
x = self.after_att[:5](x)
c2, x = self.attention2(x)
x = self.after_att[5:12](x)
c3, x = self.attention3(x)
x = self.after_att[12:](x)
x = self.avg_pool(x)
x = x.view((x.shape[0],x.shape[1]*x.shape[2]*x.shape[3]))
x = self.classifier(x)
return x, c1, c2, c3
IMAGE_LOCATION = 'testing/'
def get_html_source(URL):
"""Docstring here."""
html_source = requests.get(URL).text
return BeautifulSoup(html_source, 'html.parser')
def image_name(last_name):
"""Docstring here."""
return '{}.jpg'.format(os.path.join(IMAGE_LOCATION, str(last_name+1)))
def validate(image_url):
"""Docstring here."""
return image_url.endswith('.jpg') and image_url.startswith('http')
def download_images(soup, current_name):
"""Docstring here."""
da = soup.findAll('table','fixed offers breakword redesigned')
prices, links = [], []
for p in da[0].findAll('table'):
for tabel in p.findAll('td','photo-cell'):
for t in tabel.findChildren("img"):
image_url = t.get('src')
image = requests.get(image_url)
if image.status_code == 200:
with open(image_name(current_name), 'wb') as f:
f.write(image.content)
for price in p.findAll('p','price'):
prices.append(price.get_text().strip()[:5].replace(" ",""))
for link in p.findAll('a','linkWithHash'):
links.append(link['href'])
break
current_name += 1
return current_name, prices, links
def get_prices_olx(soup):
da = soup.findAll('table','fixed offers breakword redesigned')
prices, links = [], []
for p in da[0].findAll('table'):
for price in p.findAll('p','price'):
prices.append(price.get_text().strip()[:5].replace(" ",""))
break
for link in p.findAll('a','linkWithHash'):
links.append(link['href'])
break
return prices, links
def image_loader(loader, image_name):
image = Image.open(image_name)
image = loader(image).float()
image = torch.tensor(image, requires_grad=True)
image = image.clone().detach().unsqueeze(0)
return image
data_transforms = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor()
])
# EMAG
def search_EMAG(URL):
url = 'https://www.emag.ro/search/'
URL = url + URL
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
soup
nr_pagini = soup.findAll("p", "product-new-price")
preturi = []
for pret in nr_pagini:
if len(pret.text) > 0:
try:
preturi.append(int(str(pret).split(">",1)[1].split("<",1)[0].replace('.','')))
except:
continue
link = soup.findAll("a", "thumbnail-wrapper js-product-url", href=True)
links = []
for l in link:
links.append(l['href'])
stocks = soup.findAll("p","product-stock-status")
stoc = []
for stock in stocks:
if(str(stock).split(">",1)[1].split("<",1)[0] == "în stoc" or str(stock).split(">",1)[1].split("<",1)[0] == ""):
stoc.append(2)
elif(str(stock).split(">",1)[1].split("<",1)[0] == "stoc epuizat"):
stoc.append(0)
else:
stoc.append(1)
descs = []
desc = soup.findAll("a", "product-title js-product-url")
for description in desc:
descs.append(description.get_text().strip())
return preturi[:5], links[:5], stoc[:5], descs[:5]
def search_STRADAIT(URL):
url = 'https://www.stradait.ro/Produse/Filtru/Cautare:'
URL = url + URL
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
prices = soup.findAll("div","product-price")
preturi = []
for pret in prices:
if len(pret.text) > 0:
try:
preturi.append(int(str(pret).split(">",1)[1].split("<",1)[0][:-4].replace('.','')[:-3]))
except:
continue
links = []
for tag in soup.findAll("div","image-product-grid img-responsive"):
links.append("https://www.stradait.ro" + tag.find("a", href=True)['href'])
stocks = soup.findAll("div","stockinfo")
stoc = []
for stock in stocks:
if("in stoc" in str(stock).split(">",1)[1].split("<",1)[0].lower()):
stoc.append(2)
elif(str(stock).split(">",1)[1].split("<",1)[0] == "Stoc epuizat"):
stoc.append(0)
else:
stoc.append(1)
descs = []
desc = soup.findAll("span",style=re.compile(r'color:black'))
for description in desc:
descs.append(description.get_text().strip())
return preturi[:3], links[:3], stoc[:3], descs[:3]
def search_CEL(URL):
url = 'https://www.cel.ro/cauta/'
URL = url + URL
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
prices = soup.findAll("b",attrs={"productprice":True})
preturi = []
for pret in prices:
if len(pret.text) > 0:
try:
preturi.append(int(str(pret).split(">",1)[1].split("<",1)[0].replace('.','')))
except:
continue
links = []
for tag in soup.findAll("div","productListing-poza"):
if(tag.find("a", href=True)['href'].find("cel.ro") >= 0):
links.append(tag.find("a", href=True)['href'])
else:
links.append("https://www.cel.ro" + tag.find("a", href=True)['href'])
text = str(soup)[str(soup).find("try {$('div["):].split("\n")
contor = 0
stoc = []
for line in text:
if(contor == 3):
break
try:
in_stoc = re.search(r".*class='info_stoc .*>(.*)<.*",line).group(1)
if(in_stoc == "In stoc"):
stoc.append(2)
else:
stoc.append(1)
contor += 1
except :
pass
descs = []
desc = soup.findAll("a","productListing-data-b product_link product_name")
for description in desc:
descs.append(description.get_text().strip())
return preturi[:3], links[:3], stoc[:3], descs[:3]
def search_MEDIAGALAXY(URL):
url = 'https://cerberus.mediagalaxy.ro/catalog/search/'
URL = url + URL
page = requests.get(URL,timeout=100)
soup = BeautifulSoup(page.content, 'html.parser')
result = json.loads(str(soup))
preturi = []
links = []
stoc = []
descriptions = []
for i in range(3):
preturi.append(result['products'][i]['price'])
links.append("https://mediagalaxy.ro/" + str(result['products'][i]['url_key']) + "/cpd/" + str(result['products'][i]['sku']))
if(result['products'][i]['stock_status'] == 2):
stoc.append(1)
elif(result['products'][i]['stock_status'] == 1):
stoc.append(2)
elif(result['products'][i]['stock_status'] == 0):
stoc.append(0)
descriptions.append(result['products'][i]['name'])
return preturi, links, stoc,descriptions
def search_ALTEX(URL):
url = 'https://fenrir.altex.ro/catalog/search/'
URL = url + URL
page = requests.get(URL,timeout=100)
soup = BeautifulSoup(page.content, 'html.parser')
result = json.loads(str(soup))
preturi, links, stoc, descriptions = [] , [], [], []
for i in range(3):
preturi.append(result['products'][i]['price'])
links.append("https://altex.ro/" + str(result['products'][i]['url_key']))
if(result['products'][i]['stock_status'] == 2):
stoc.append(1)
elif(result['products'][i]['stock_status'] == 1):
stoc.append(2)
elif(result['products'][i]['stock_status'] == 0):
stoc.append(0)
descriptions.append(result['products'][i]['name'])
return preturi, links, stoc, descriptions
def search_VEXIO(URL):
url = 'https://sb.searchnode.net/v1/query/docs?query_key=a3nmqeRLHM2AU656Z8CnKv0xCGnitxan&search_query='
URL = url + URL
page = requests.get(URL,timeout=100)
soup = BeautifulSoup(page.content, 'html.parser')
result = json.loads(str(soup))
preturi = []
links = []
stoc = []
descriptions = []
for i in range(3) if len(result['docs']) >= 3 else range(len(result['docs'])):
preturi.append(result['docs'][i]['f_price'])
links.append("https://www.vexio.ro" + str(result['docs'][i]['url']))
if(result['docs'][i]['s_stock_text'] == "contactati-ne pentru info stoc"):
stoc.append(1)
elif(result['docs'][i]['s_stock_text'] == "in stoc depozit"):
stoc.append(2)
else:
stoc.append(0)
descriptions.append(result['docs'][i]['s_title'])
return preturi, links, stoc, descriptions
def search_OLX(url):
prices = []
links = []
for i in range(1,5):
URL = 'https://www.olx.ro/oferte/q-' + url +'/?page=' + str(i) +'&search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON'
soup = get_html_source(URL)
if((soup.findAll('input','br3')[0].get('value')) != url):
break
desc = soup.body.findAll(text="Asigura-te ca ai scris corect (se intampla oricui) sau incearca o cautare mai generala")
if(len(desc) > 0):
break
price, link = get_prices_olx(soup)
for pret in price:
prices.append(pret)
for lnk in link:
links.append(lnk)
return prices, links
def search_OLX_iphone11_11pro(url, which_phone):
""" phone == iphone 11 ? which_phone = 1 : which_phone = 0"""
model_nou = ModifyVGG(224,2)
model_nou.classifier[6] = nn.Linear(in_features=4096, out_features=2, bias=True)
model = nn.DataParallel(model_nou)
model.load_state_dict(torch.load("trained_model"))
prices = []
links = []
current_name = 0
j=0
for i in range(1,5):
URL = 'https://www.olx.ro/oferte/q-' + url + '/?page=' + str(i) +'&search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON'
soup = get_html_source(URL)
current_name, price, link = download_images(soup,current_name)
for pret in price:
prices.append(pret)
for lnk in link:
links.append(lnk)
i=0
prices_final, links_final = [], []
for file in os.listdir(IMAGE_LOCATION):
if file.endswith('.jpg'):
try:
output = (model(image_loader(data_transforms, IMAGE_LOCATION + file))[0])
if(F.softmax(output[0],dim=0)[which_phone] > 0.8):
prices_final.append(prices[int(os.path.splitext(file)[0])-1])
links_final.append(links[int(os.path.splitext(file)[0])-1])
except:
print("sss")
i += 1
for file in os.listdir('testing'):
if file.endswith('.jpg'):
os.remove("testing/" + file)
return prices_final, links_final, prices, links
def make_prices(siteid, prodtitle, function, lastprice):
pret, link, stoc, description = function(prodtitle)
prices = []
descriptions = []
for desc in description:
if (siteid == 2 or siteid == 6):
if(desc.lower().find((prodtitle + ",").lower()) != -1):
descriptions.append(desc)
else:
if(desc.lower().find((prodtitle + " ").lower()) != -1 or desc.lower().find((prodtitle + ",").lower()) != -1):
descriptions.append(desc)
for desc in descriptions:
p = pret[description.index(desc)]
if(p / lastprice > 0.75):
prices.append(p)
if len(prices):
price_update = (min(prices))
link_update = (link[pret.index(min(prices))])
stock_update = (stoc[pret.index(min(prices))])
desc_update = (description[pret.index(min(prices))])
else:
price_update = (0)
link_update = (0)
stock_update = (0)
desc_update = prodtitle
return siteid, price_update, link_update, stock_update, desc_update
def update_prices(prodtitle, lastprice):
""" Updateaza preturile de pe toate site-urile produsulului cu id-ul idprod"""
update = []
update.append([make_prices(2,prodtitle, search_ALTEX, lastprice)])
update.append([make_prices(6,prodtitle, search_MEDIAGALAXY, lastprice)])
update.append([make_prices(3,prodtitle, search_VEXIO, lastprice)])
update.append([make_prices(5,prodtitle, search_STRADAIT, lastprice)])
update.append([make_prices(4,prodtitle, search_CEL, lastprice)])
update.append([make_prices(1,prodtitle, search_EMAG, lastprice)])
return update
def update_prices_db(idprod, prodtitle, lastprice):
""" Updateaza preturile din baza de date ale produslui idprod"""
updates = update_prices(prodtitle, lastprice)
if (idprod == 1):
updates.append([7,np.mean([int(x) for x in search_OLX_iphone11_11pro('iphone 11 pro',0)[0]]), 'https://www.olx.ro/oferte/q-iphone-11-pro/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
elif (idprod == 18):
updates.append([7,np.mean([int(x) for x in search_OLX_iphone11_11pro('iphone 11',1)[0]]), 'https://www.olx.ro/oferte/q-iphone-11/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
else:
updates.append([7,np.mean([int(x) for x in search_OLX(prodtitle)[0]]), 'https://www.olx.ro/oferte/q-' + prodtitle +'/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
min_price = 99999
for update in updates:
if(update[0] != 7):
if(min_price > round(update[0][1], 2) and round(update[0][1], 2) != 0):
min_price = round(update[0][1], 2)
sql = "UPDATE `new_schema`.`products_price` SET `availability` = '"+ str(update[0][3]) +"', `price` = '" + str((round(update[0][1], 2))) +"', `prodtitle` = '" + str(update[0][4]) +"', `link` = '" + str(update[0][2]) +"' WHERE (`idprod` = '" + str(idprod) +"' and `site` = '" + str(update[0][0]) +"');"
else:
olx_price = round(update[1], 2)
sql = "UPDATE `new_schema`.`products_price` SET `availability` = '"+ str(update[3]) +"', `price` = '" + str((round(update[1], 2))) +"', `link` = '" + str(update[2]) +"' WHERE (`idprod` = '" + str(idprod) +"' and `site` = '" + str(update[0]) +"');"
update_sql(sql)
if(not (min_price > 1000 or min_price < 10000)):
min_price = olx_price
update_sql("UPDATE `new_schema`.`products` SET `price` = '" + str(min_price) + "' WHERE (`idproducts` = '" + str(idprod) +"');")
return
def update_prices_db_first_time(idprod, prodtitle):
""" Updateaza preturile din baza de date ale produslui idprod"""
updates = update_prices(prodtitle, 1500)
if (idprod == 1):
updates.append([7,np.mean([int(x) for x in search_OLX_iphone11_11pro('iphone 11 pro',0)[0]]), 'https://www.olx.ro/oferte/q-iphone-11-pro/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
elif (idprod == 18):
updates.append([7,np.mean([int(x) for x in search_OLX_iphone11_11pro('iphone 11',1)[0]]), 'https://www.olx.ro/oferte/q-iphone-11/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
else:
updates.append([7,np.mean([int(x) for x in search_OLX(prodtitle)[0]]), 'https://www.olx.ro/oferte/q-' + prodtitle +'/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
for update in updates:
if(update[0] != 7):
sql = "INSERT INTO `new_schema`.`products_price` (`idprod`, `site`, `prodtitle`, `availability`, `price`, `link`) VALUES ('"+ str(idprod) +"', '"+ str(update[0][0]) +"', '"+ str(update[0][4]) +"', '"+ str(update[0][3]) +"', '" + str(round(update[0][1], 2)) +"', '" + str(update[0][2]) +"');"
else:
sql = "INSERT INTO `new_schema`.`products_price` (`idprod`, `site`, `prodtitle`, `availability`, `price`, `link`) VALUES ('"+ str(idprod) +"', '"+ str(update[0]) +"', '"+ str(prodtitle) +"', '"+ str(update[3]) +"', '" + str(round(update[1], 2)) +"', '" + str(update[2]) +"');"
update_sql(sql)
return
def update_sql(sql):
db = MySQLdb.connect("DB_IP","DB_username","DB_pass","DB_schema" )
cursor = db.cursor()
try:
cursor.execute(sql)
db.commit()
except ValueError:
print("Nu am putut updata!")
print(ValueError)
db.rollback()
db.close()
return
def select_sql(sql):
db = MySQLdb.connect("DB_ip","DB_username","DB_passDB_pass","DB_schema" )
cursor = db.cursor()
cursor.execute(sql)
results = cursor.fetchall()
db.close()
return results
def get_products_to_update_first_time():
products_to_update = select_sql("SELECT * FROM new_schema.new_products;")
last_id = int(select_sql("SELECT idproducts FROM new_schema.products ORDER BY idproducts DESC LIMIT 1;")[0][0])
for product in products_to_update:
try:
last_id += 1
update_prices_db_first_time(last_id, product[1])
min_price = select_sql("SELECT MIN(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(last_id) + "';")[0][0]
print("'"+ str(last_id) +"', '"+ str(min_price)+"', '"+ str(product[1])+"', '"+ str(product[2])+"'")
update_sql("INSERT INTO `new_schema`.`products` (`idproducts`, `prodtitle`, `price`, `prodimg`, `maker`) VALUES ('"+ str(last_id) +"', '"+str(product[1]) +"', '"+ str(min_price)+"', '"+ str(product[2])+"', '"+ str(product[3])+"');")
min_price = select_sql("SELECT MIN(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(last_id) + "' and site != 7;")[0][0]
avg_price = select_sql("SELECT AVG(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(last_id) + "' and site != 7;")[0][0]
if(min_price == 'None'):
min_price = last_price
if(avg_price == 'None'):
avg_price = last_price
update_sql("INSERT INTO `new_schema`.`products_price_history` (`idprod`, `date`, `min_price`, `avg_price`) VALUES ('"+ str(last_id) +"', "+ "curdate()" +", '"+ str(min_price) +"', '"+ str(avg_price) +"');")
except:
print("I'm going forward!")
update_sql("DELETE FROM new_schema.new_products")
def get_products_to_update():
products_to_update = select_sql("SELECT * FROM new_schema.products;")
for product in products_to_update:
print(product)
try:
if( product[2] == None):
last_price = 1
else:
last_price = float(product[2])
update_prices_db(product[0], product[1],last_price)
min_price = select_sql("SELECT MIN(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(product[0]) + "' and site != 7;")[0][0]
avg_price = select_sql("SELECT AVG(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(product[0]) + "' and site != 7;")[0][0]
print("'"+ str(product[0]) +"', '"+ str(min_price)+"', '"+ str(product[1])+"', '"+ str(last_price)+"'")
if(min_price == None):
min_price = select_sql("SELECT MIN(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(product[0]) + "' and site = 7;")[0][0]
if(avg_price == None):
avg_price = select_sql("SELECT MIN(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(product[0]) + "' and site = 7;")[0][0]
update_sql("INSERT INTO `new_schema`.`products_price_history` (`idprod`, `date`, `min_price`, `avg_price`) VALUES ('"+ str(product[0]) +"', "+ "curdate()" +", '"+ str(min_price) +"', '"+ str(avg_price) +"');")
except:
print("I'm going forward!")
print("--------------------------------\n")
get_products_to_update_first_time()
get_products_to_update()
```
|
github_jupyter
|
import requests
from bs4 import BeautifulSoup
import time
import MySQLdb
import math
import json
import random
import re
# Librarii model
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import torchvision.utils as utils
import cv2
import time
import os
import copy
import warnings
import pdb;
import torch.nn.functional as F
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
import matplotlib.image as mpimg
from PIL import *
class Attention(nn.Module):
def __init__(self, in_features):
super(Attention, self).__init__()
self.op = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=64, kernel_size=3, padding=1),
nn.Conv2d(in_channels=64, out_channels=16, kernel_size=3, padding=1),
nn.Conv2d(in_channels=16, out_channels=1, kernel_size=1, padding=0))
def forward(self, l):
N, C, W, H = l.size()
c = self.op(l)
a = torch.sigmoid(c)
g = torch.mul(a.expand_as(l), l)
return a.view(N,1,W,H), g
class ModifyVGG(nn.Module):
def __init__(self, im_size, num_classes):
super(ModifyVGG,self).__init__()
vgg_model = models.vgg16(pretrained=True)
# everything before layer 4
self.before_att = nn.Sequential(*list(vgg_model.features.children())[:4])
# added layer
self.attention1 = Attention(in_features=64)
self.attention2 = Attention(in_features=128)
self.attention3 = Attention(in_features=256)
# everything after layer 4
self.after_att = nn.Sequential(*list(vgg_model.features.children())[4:])
self.avg_pool = vgg_model.avgpool
self.classifier = nn.Sequential(*list(vgg_model.classifier.children()))
def forward(self,x):
# run vgg before layer 4
x = self.before_att(x)
# run added layer
c1, x = self.attention1(x)
# run everything after layer 4 (as in the original vgg model)
x = self.after_att[:5](x)
c2, x = self.attention2(x)
x = self.after_att[5:12](x)
c3, x = self.attention3(x)
x = self.after_att[12:](x)
x = self.avg_pool(x)
x = x.view((x.shape[0],x.shape[1]*x.shape[2]*x.shape[3]))
x = self.classifier(x)
return x, c1, c2, c3
IMAGE_LOCATION = 'testing/'
def get_html_source(URL):
"""Docstring here."""
html_source = requests.get(URL).text
return BeautifulSoup(html_source, 'html.parser')
def image_name(last_name):
"""Docstring here."""
return '{}.jpg'.format(os.path.join(IMAGE_LOCATION, str(last_name+1)))
def validate(image_url):
"""Docstring here."""
return image_url.endswith('.jpg') and image_url.startswith('http')
def download_images(soup, current_name):
"""Docstring here."""
da = soup.findAll('table','fixed offers breakword redesigned')
prices, links = [], []
for p in da[0].findAll('table'):
for tabel in p.findAll('td','photo-cell'):
for t in tabel.findChildren("img"):
image_url = t.get('src')
image = requests.get(image_url)
if image.status_code == 200:
with open(image_name(current_name), 'wb') as f:
f.write(image.content)
for price in p.findAll('p','price'):
prices.append(price.get_text().strip()[:5].replace(" ",""))
for link in p.findAll('a','linkWithHash'):
links.append(link['href'])
break
current_name += 1
return current_name, prices, links
def get_prices_olx(soup):
da = soup.findAll('table','fixed offers breakword redesigned')
prices, links = [], []
for p in da[0].findAll('table'):
for price in p.findAll('p','price'):
prices.append(price.get_text().strip()[:5].replace(" ",""))
break
for link in p.findAll('a','linkWithHash'):
links.append(link['href'])
break
return prices, links
def image_loader(loader, image_name):
image = Image.open(image_name)
image = loader(image).float()
image = torch.tensor(image, requires_grad=True)
image = image.clone().detach().unsqueeze(0)
return image
data_transforms = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor()
])
# EMAG
def search_EMAG(URL):
url = 'https://www.emag.ro/search/'
URL = url + URL
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
soup
nr_pagini = soup.findAll("p", "product-new-price")
preturi = []
for pret in nr_pagini:
if len(pret.text) > 0:
try:
preturi.append(int(str(pret).split(">",1)[1].split("<",1)[0].replace('.','')))
except:
continue
link = soup.findAll("a", "thumbnail-wrapper js-product-url", href=True)
links = []
for l in link:
links.append(l['href'])
stocks = soup.findAll("p","product-stock-status")
stoc = []
for stock in stocks:
if(str(stock).split(">",1)[1].split("<",1)[0] == "în stoc" or str(stock).split(">",1)[1].split("<",1)[0] == ""):
stoc.append(2)
elif(str(stock).split(">",1)[1].split("<",1)[0] == "stoc epuizat"):
stoc.append(0)
else:
stoc.append(1)
descs = []
desc = soup.findAll("a", "product-title js-product-url")
for description in desc:
descs.append(description.get_text().strip())
return preturi[:5], links[:5], stoc[:5], descs[:5]
def search_STRADAIT(URL):
url = 'https://www.stradait.ro/Produse/Filtru/Cautare:'
URL = url + URL
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
prices = soup.findAll("div","product-price")
preturi = []
for pret in prices:
if len(pret.text) > 0:
try:
preturi.append(int(str(pret).split(">",1)[1].split("<",1)[0][:-4].replace('.','')[:-3]))
except:
continue
links = []
for tag in soup.findAll("div","image-product-grid img-responsive"):
links.append("https://www.stradait.ro" + tag.find("a", href=True)['href'])
stocks = soup.findAll("div","stockinfo")
stoc = []
for stock in stocks:
if("in stoc" in str(stock).split(">",1)[1].split("<",1)[0].lower()):
stoc.append(2)
elif(str(stock).split(">",1)[1].split("<",1)[0] == "Stoc epuizat"):
stoc.append(0)
else:
stoc.append(1)
descs = []
desc = soup.findAll("span",style=re.compile(r'color:black'))
for description in desc:
descs.append(description.get_text().strip())
return preturi[:3], links[:3], stoc[:3], descs[:3]
def search_CEL(URL):
url = 'https://www.cel.ro/cauta/'
URL = url + URL
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
prices = soup.findAll("b",attrs={"productprice":True})
preturi = []
for pret in prices:
if len(pret.text) > 0:
try:
preturi.append(int(str(pret).split(">",1)[1].split("<",1)[0].replace('.','')))
except:
continue
links = []
for tag in soup.findAll("div","productListing-poza"):
if(tag.find("a", href=True)['href'].find("cel.ro") >= 0):
links.append(tag.find("a", href=True)['href'])
else:
links.append("https://www.cel.ro" + tag.find("a", href=True)['href'])
text = str(soup)[str(soup).find("try {$('div["):].split("\n")
contor = 0
stoc = []
for line in text:
if(contor == 3):
break
try:
in_stoc = re.search(r".*class='info_stoc .*>(.*)<.*",line).group(1)
if(in_stoc == "In stoc"):
stoc.append(2)
else:
stoc.append(1)
contor += 1
except :
pass
descs = []
desc = soup.findAll("a","productListing-data-b product_link product_name")
for description in desc:
descs.append(description.get_text().strip())
return preturi[:3], links[:3], stoc[:3], descs[:3]
def search_MEDIAGALAXY(URL):
url = 'https://cerberus.mediagalaxy.ro/catalog/search/'
URL = url + URL
page = requests.get(URL,timeout=100)
soup = BeautifulSoup(page.content, 'html.parser')
result = json.loads(str(soup))
preturi = []
links = []
stoc = []
descriptions = []
for i in range(3):
preturi.append(result['products'][i]['price'])
links.append("https://mediagalaxy.ro/" + str(result['products'][i]['url_key']) + "/cpd/" + str(result['products'][i]['sku']))
if(result['products'][i]['stock_status'] == 2):
stoc.append(1)
elif(result['products'][i]['stock_status'] == 1):
stoc.append(2)
elif(result['products'][i]['stock_status'] == 0):
stoc.append(0)
descriptions.append(result['products'][i]['name'])
return preturi, links, stoc,descriptions
def search_ALTEX(URL):
url = 'https://fenrir.altex.ro/catalog/search/'
URL = url + URL
page = requests.get(URL,timeout=100)
soup = BeautifulSoup(page.content, 'html.parser')
result = json.loads(str(soup))
preturi, links, stoc, descriptions = [] , [], [], []
for i in range(3):
preturi.append(result['products'][i]['price'])
links.append("https://altex.ro/" + str(result['products'][i]['url_key']))
if(result['products'][i]['stock_status'] == 2):
stoc.append(1)
elif(result['products'][i]['stock_status'] == 1):
stoc.append(2)
elif(result['products'][i]['stock_status'] == 0):
stoc.append(0)
descriptions.append(result['products'][i]['name'])
return preturi, links, stoc, descriptions
def search_VEXIO(URL):
url = 'https://sb.searchnode.net/v1/query/docs?query_key=a3nmqeRLHM2AU656Z8CnKv0xCGnitxan&search_query='
URL = url + URL
page = requests.get(URL,timeout=100)
soup = BeautifulSoup(page.content, 'html.parser')
result = json.loads(str(soup))
preturi = []
links = []
stoc = []
descriptions = []
for i in range(3) if len(result['docs']) >= 3 else range(len(result['docs'])):
preturi.append(result['docs'][i]['f_price'])
links.append("https://www.vexio.ro" + str(result['docs'][i]['url']))
if(result['docs'][i]['s_stock_text'] == "contactati-ne pentru info stoc"):
stoc.append(1)
elif(result['docs'][i]['s_stock_text'] == "in stoc depozit"):
stoc.append(2)
else:
stoc.append(0)
descriptions.append(result['docs'][i]['s_title'])
return preturi, links, stoc, descriptions
def search_OLX(url):
prices = []
links = []
for i in range(1,5):
URL = 'https://www.olx.ro/oferte/q-' + url +'/?page=' + str(i) +'&search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON'
soup = get_html_source(URL)
if((soup.findAll('input','br3')[0].get('value')) != url):
break
desc = soup.body.findAll(text="Asigura-te ca ai scris corect (se intampla oricui) sau incearca o cautare mai generala")
if(len(desc) > 0):
break
price, link = get_prices_olx(soup)
for pret in price:
prices.append(pret)
for lnk in link:
links.append(lnk)
return prices, links
def search_OLX_iphone11_11pro(url, which_phone):
""" phone == iphone 11 ? which_phone = 1 : which_phone = 0"""
model_nou = ModifyVGG(224,2)
model_nou.classifier[6] = nn.Linear(in_features=4096, out_features=2, bias=True)
model = nn.DataParallel(model_nou)
model.load_state_dict(torch.load("trained_model"))
prices = []
links = []
current_name = 0
j=0
for i in range(1,5):
URL = 'https://www.olx.ro/oferte/q-' + url + '/?page=' + str(i) +'&search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON'
soup = get_html_source(URL)
current_name, price, link = download_images(soup,current_name)
for pret in price:
prices.append(pret)
for lnk in link:
links.append(lnk)
i=0
prices_final, links_final = [], []
for file in os.listdir(IMAGE_LOCATION):
if file.endswith('.jpg'):
try:
output = (model(image_loader(data_transforms, IMAGE_LOCATION + file))[0])
if(F.softmax(output[0],dim=0)[which_phone] > 0.8):
prices_final.append(prices[int(os.path.splitext(file)[0])-1])
links_final.append(links[int(os.path.splitext(file)[0])-1])
except:
print("sss")
i += 1
for file in os.listdir('testing'):
if file.endswith('.jpg'):
os.remove("testing/" + file)
return prices_final, links_final, prices, links
def make_prices(siteid, prodtitle, function, lastprice):
pret, link, stoc, description = function(prodtitle)
prices = []
descriptions = []
for desc in description:
if (siteid == 2 or siteid == 6):
if(desc.lower().find((prodtitle + ",").lower()) != -1):
descriptions.append(desc)
else:
if(desc.lower().find((prodtitle + " ").lower()) != -1 or desc.lower().find((prodtitle + ",").lower()) != -1):
descriptions.append(desc)
for desc in descriptions:
p = pret[description.index(desc)]
if(p / lastprice > 0.75):
prices.append(p)
if len(prices):
price_update = (min(prices))
link_update = (link[pret.index(min(prices))])
stock_update = (stoc[pret.index(min(prices))])
desc_update = (description[pret.index(min(prices))])
else:
price_update = (0)
link_update = (0)
stock_update = (0)
desc_update = prodtitle
return siteid, price_update, link_update, stock_update, desc_update
def update_prices(prodtitle, lastprice):
""" Updateaza preturile de pe toate site-urile produsulului cu id-ul idprod"""
update = []
update.append([make_prices(2,prodtitle, search_ALTEX, lastprice)])
update.append([make_prices(6,prodtitle, search_MEDIAGALAXY, lastprice)])
update.append([make_prices(3,prodtitle, search_VEXIO, lastprice)])
update.append([make_prices(5,prodtitle, search_STRADAIT, lastprice)])
update.append([make_prices(4,prodtitle, search_CEL, lastprice)])
update.append([make_prices(1,prodtitle, search_EMAG, lastprice)])
return update
def update_prices_db(idprod, prodtitle, lastprice):
""" Updateaza preturile din baza de date ale produslui idprod"""
updates = update_prices(prodtitle, lastprice)
if (idprod == 1):
updates.append([7,np.mean([int(x) for x in search_OLX_iphone11_11pro('iphone 11 pro',0)[0]]), 'https://www.olx.ro/oferte/q-iphone-11-pro/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
elif (idprod == 18):
updates.append([7,np.mean([int(x) for x in search_OLX_iphone11_11pro('iphone 11',1)[0]]), 'https://www.olx.ro/oferte/q-iphone-11/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
else:
updates.append([7,np.mean([int(x) for x in search_OLX(prodtitle)[0]]), 'https://www.olx.ro/oferte/q-' + prodtitle +'/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
min_price = 99999
for update in updates:
if(update[0] != 7):
if(min_price > round(update[0][1], 2) and round(update[0][1], 2) != 0):
min_price = round(update[0][1], 2)
sql = "UPDATE `new_schema`.`products_price` SET `availability` = '"+ str(update[0][3]) +"', `price` = '" + str((round(update[0][1], 2))) +"', `prodtitle` = '" + str(update[0][4]) +"', `link` = '" + str(update[0][2]) +"' WHERE (`idprod` = '" + str(idprod) +"' and `site` = '" + str(update[0][0]) +"');"
else:
olx_price = round(update[1], 2)
sql = "UPDATE `new_schema`.`products_price` SET `availability` = '"+ str(update[3]) +"', `price` = '" + str((round(update[1], 2))) +"', `link` = '" + str(update[2]) +"' WHERE (`idprod` = '" + str(idprod) +"' and `site` = '" + str(update[0]) +"');"
update_sql(sql)
if(not (min_price > 1000 or min_price < 10000)):
min_price = olx_price
update_sql("UPDATE `new_schema`.`products` SET `price` = '" + str(min_price) + "' WHERE (`idproducts` = '" + str(idprod) +"');")
return
def update_prices_db_first_time(idprod, prodtitle):
""" Updateaza preturile din baza de date ale produslui idprod"""
updates = update_prices(prodtitle, 1500)
if (idprod == 1):
updates.append([7,np.mean([int(x) for x in search_OLX_iphone11_11pro('iphone 11 pro',0)[0]]), 'https://www.olx.ro/oferte/q-iphone-11-pro/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
elif (idprod == 18):
updates.append([7,np.mean([int(x) for x in search_OLX_iphone11_11pro('iphone 11',1)[0]]), 'https://www.olx.ro/oferte/q-iphone-11/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
else:
updates.append([7,np.mean([int(x) for x in search_OLX(prodtitle)[0]]), 'https://www.olx.ro/oferte/q-' + prodtitle +'/?search%5Bfilter_float_price%3Afrom%5D=1500¤cy=RON%27',2])
for update in updates:
if(update[0] != 7):
sql = "INSERT INTO `new_schema`.`products_price` (`idprod`, `site`, `prodtitle`, `availability`, `price`, `link`) VALUES ('"+ str(idprod) +"', '"+ str(update[0][0]) +"', '"+ str(update[0][4]) +"', '"+ str(update[0][3]) +"', '" + str(round(update[0][1], 2)) +"', '" + str(update[0][2]) +"');"
else:
sql = "INSERT INTO `new_schema`.`products_price` (`idprod`, `site`, `prodtitle`, `availability`, `price`, `link`) VALUES ('"+ str(idprod) +"', '"+ str(update[0]) +"', '"+ str(prodtitle) +"', '"+ str(update[3]) +"', '" + str(round(update[1], 2)) +"', '" + str(update[2]) +"');"
update_sql(sql)
return
def update_sql(sql):
db = MySQLdb.connect("DB_IP","DB_username","DB_pass","DB_schema" )
cursor = db.cursor()
try:
cursor.execute(sql)
db.commit()
except ValueError:
print("Nu am putut updata!")
print(ValueError)
db.rollback()
db.close()
return
def select_sql(sql):
db = MySQLdb.connect("DB_ip","DB_username","DB_passDB_pass","DB_schema" )
cursor = db.cursor()
cursor.execute(sql)
results = cursor.fetchall()
db.close()
return results
def get_products_to_update_first_time():
products_to_update = select_sql("SELECT * FROM new_schema.new_products;")
last_id = int(select_sql("SELECT idproducts FROM new_schema.products ORDER BY idproducts DESC LIMIT 1;")[0][0])
for product in products_to_update:
try:
last_id += 1
update_prices_db_first_time(last_id, product[1])
min_price = select_sql("SELECT MIN(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(last_id) + "';")[0][0]
print("'"+ str(last_id) +"', '"+ str(min_price)+"', '"+ str(product[1])+"', '"+ str(product[2])+"'")
update_sql("INSERT INTO `new_schema`.`products` (`idproducts`, `prodtitle`, `price`, `prodimg`, `maker`) VALUES ('"+ str(last_id) +"', '"+str(product[1]) +"', '"+ str(min_price)+"', '"+ str(product[2])+"', '"+ str(product[3])+"');")
min_price = select_sql("SELECT MIN(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(last_id) + "' and site != 7;")[0][0]
avg_price = select_sql("SELECT AVG(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(last_id) + "' and site != 7;")[0][0]
if(min_price == 'None'):
min_price = last_price
if(avg_price == 'None'):
avg_price = last_price
update_sql("INSERT INTO `new_schema`.`products_price_history` (`idprod`, `date`, `min_price`, `avg_price`) VALUES ('"+ str(last_id) +"', "+ "curdate()" +", '"+ str(min_price) +"', '"+ str(avg_price) +"');")
except:
print("I'm going forward!")
update_sql("DELETE FROM new_schema.new_products")
def get_products_to_update():
products_to_update = select_sql("SELECT * FROM new_schema.products;")
for product in products_to_update:
print(product)
try:
if( product[2] == None):
last_price = 1
else:
last_price = float(product[2])
update_prices_db(product[0], product[1],last_price)
min_price = select_sql("SELECT MIN(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(product[0]) + "' and site != 7;")[0][0]
avg_price = select_sql("SELECT AVG(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(product[0]) + "' and site != 7;")[0][0]
print("'"+ str(product[0]) +"', '"+ str(min_price)+"', '"+ str(product[1])+"', '"+ str(last_price)+"'")
if(min_price == None):
min_price = select_sql("SELECT MIN(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(product[0]) + "' and site = 7;")[0][0]
if(avg_price == None):
avg_price = select_sql("SELECT MIN(NULLIF(price, 0)) FROM new_schema.products_price WHERE idprod = '" + str(product[0]) + "' and site = 7;")[0][0]
update_sql("INSERT INTO `new_schema`.`products_price_history` (`idprod`, `date`, `min_price`, `avg_price`) VALUES ('"+ str(product[0]) +"', "+ "curdate()" +", '"+ str(min_price) +"', '"+ str(avg_price) +"');")
except:
print("I'm going forward!")
print("--------------------------------\n")
get_products_to_update_first_time()
get_products_to_update()
| 0.724481 | 0.273456 |
```
import gym
import cv2
import numpy as np
# DM Control Suite dm2gym wrapper examples
# https://github.com/zuoxingdong/dm2gym
env = gym.make('dm2gym:FishSwim-v0', environment_kwargs={'flat_observation': True})
#env = gym.make('Pendulum-v0')
observation_space = env.observation_space
action_space = env.action_space
print(observation_space)
print(action_space)
env = gym.make('dm2gym:HumanoidRun-v0', environment_kwargs={'flat_observation': True})
observation_space = env.observation_space
action_space = env.action_space
print(observation_space)
print(action_space)
# Rendering using dm2gym wrapper
kwargs={'use_opencv_renderer': True}
obs = env.reset()
while True:
action = np.random.uniform(-1.0, 1.0, size=action_space.shape)
obs, rew, done, info = env.step(action)
env.render(mode = 'human', **kwargs)
from dm_control import suite
for domain_name, task_name in suite.ALL_TASKS:
print(domain_name, task_name)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dm_control import suite
from dm_control.suite.wrappers import action_noise
from six.moves import input
from dm_control import viewer
task_kwargs = {}
task_kwargs['time_limit'] = float('inf')
env = suite.load(domain_name='humanoid', task_name='run', task_kwargs=task_kwargs)
env.task.visualize_reward = True
env = action_noise.Wrapper(env, scale=1.0)
viewer.launch(env)
# Another way to perfrom random actions
action_spec = env.action_spec()
# Define a uniform random policy.
def random_policy(time_step):
del time_step # Unused.
return np.random.uniform(low=action_spec.minimum,
high=action_spec.maximum,
size=action_spec.shape)
viewer.launch(env, policy=random_policy)
# Saving video example
def grabFrame(env):
# Get RGB rendering of env
rgbArr = env.physics.render(480, 640, camera_id=0)
# Convert to BGR for use with OpenCV
return cv2.cvtColor(rgbArr, cv2.COLOR_BGR2RGB)
# Load task:
env = suite.load(domain_name="humanoid", task_name="run")
# Setup video writer - mp4 at 60 fps
video_name = 'video.mp4'
frame = grabFrame(env)
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'mp4v'), 60.0, (width, height))
# First pass - Step through an episode and capture each frame
action_spec = env.action_spec()
time_step = env.reset()
while not time_step.last():
action = np.random.uniform(action_spec.minimum,
action_spec.maximum,
size=action_spec.shape)
time_step = env.step(action)
frame = grabFrame(env)
# Render env output to video
video.write(grabFrame(env))
# End render to video file
video.release()
# Second pass - Playback
cap = cv2.VideoCapture(video_name)
while(cap.isOpened()):
ret, frame = cap.read()
cv2.imshow('Playback', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
# Exit
cv2.destroyAllWindows()
from dm_control import composer
from dm_control.locomotion.examples import basic_cmu_2019, basic_rodent_2020
import numpy as np
# Build an example environment.
#env = basic_cmu_2019.cmu_humanoid_run_walls()
env = basic_rodent_2020.rodent_maze_forage()
# Get the `action_spec` describing the control inputs.
action_spec = env.action_spec()
# Define a uniform random policy.
def random_policy(time_step):
del time_step # Unused.
return np.random.uniform(low=action_spec.minimum,
high=action_spec.maximum,
size=action_spec.shape)
# Step through the environment for one episode with random actions.
#time_step = env.reset()
'''while not time_step.last():
action = np.random.uniform(action_spec.minimum, action_spec.maximum,
size=action_spec.shape)
time_step = env.step(action)
print("reward = {}, discount = {}, observations = {}.".format(
time_step.reward, time_step.discount, time_step.observation))'''
viewer.launch(env, policy=random_policy)
```
|
github_jupyter
|
import gym
import cv2
import numpy as np
# DM Control Suite dm2gym wrapper examples
# https://github.com/zuoxingdong/dm2gym
env = gym.make('dm2gym:FishSwim-v0', environment_kwargs={'flat_observation': True})
#env = gym.make('Pendulum-v0')
observation_space = env.observation_space
action_space = env.action_space
print(observation_space)
print(action_space)
env = gym.make('dm2gym:HumanoidRun-v0', environment_kwargs={'flat_observation': True})
observation_space = env.observation_space
action_space = env.action_space
print(observation_space)
print(action_space)
# Rendering using dm2gym wrapper
kwargs={'use_opencv_renderer': True}
obs = env.reset()
while True:
action = np.random.uniform(-1.0, 1.0, size=action_space.shape)
obs, rew, done, info = env.step(action)
env.render(mode = 'human', **kwargs)
from dm_control import suite
for domain_name, task_name in suite.ALL_TASKS:
print(domain_name, task_name)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dm_control import suite
from dm_control.suite.wrappers import action_noise
from six.moves import input
from dm_control import viewer
task_kwargs = {}
task_kwargs['time_limit'] = float('inf')
env = suite.load(domain_name='humanoid', task_name='run', task_kwargs=task_kwargs)
env.task.visualize_reward = True
env = action_noise.Wrapper(env, scale=1.0)
viewer.launch(env)
# Another way to perfrom random actions
action_spec = env.action_spec()
# Define a uniform random policy.
def random_policy(time_step):
del time_step # Unused.
return np.random.uniform(low=action_spec.minimum,
high=action_spec.maximum,
size=action_spec.shape)
viewer.launch(env, policy=random_policy)
# Saving video example
def grabFrame(env):
# Get RGB rendering of env
rgbArr = env.physics.render(480, 640, camera_id=0)
# Convert to BGR for use with OpenCV
return cv2.cvtColor(rgbArr, cv2.COLOR_BGR2RGB)
# Load task:
env = suite.load(domain_name="humanoid", task_name="run")
# Setup video writer - mp4 at 60 fps
video_name = 'video.mp4'
frame = grabFrame(env)
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'mp4v'), 60.0, (width, height))
# First pass - Step through an episode and capture each frame
action_spec = env.action_spec()
time_step = env.reset()
while not time_step.last():
action = np.random.uniform(action_spec.minimum,
action_spec.maximum,
size=action_spec.shape)
time_step = env.step(action)
frame = grabFrame(env)
# Render env output to video
video.write(grabFrame(env))
# End render to video file
video.release()
# Second pass - Playback
cap = cv2.VideoCapture(video_name)
while(cap.isOpened()):
ret, frame = cap.read()
cv2.imshow('Playback', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
# Exit
cv2.destroyAllWindows()
from dm_control import composer
from dm_control.locomotion.examples import basic_cmu_2019, basic_rodent_2020
import numpy as np
# Build an example environment.
#env = basic_cmu_2019.cmu_humanoid_run_walls()
env = basic_rodent_2020.rodent_maze_forage()
# Get the `action_spec` describing the control inputs.
action_spec = env.action_spec()
# Define a uniform random policy.
def random_policy(time_step):
del time_step # Unused.
return np.random.uniform(low=action_spec.minimum,
high=action_spec.maximum,
size=action_spec.shape)
# Step through the environment for one episode with random actions.
#time_step = env.reset()
'''while not time_step.last():
action = np.random.uniform(action_spec.minimum, action_spec.maximum,
size=action_spec.shape)
time_step = env.step(action)
print("reward = {}, discount = {}, observations = {}.".format(
time_step.reward, time_step.discount, time_step.observation))'''
viewer.launch(env, policy=random_policy)
| 0.731538 | 0.382055 |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.

# Automated Machine Learning
_**Named Entity Recognition Using AutoML NLP**_
## Contents
1. [Introduction](#Introduction)
1. [Setup](#Setup)
1. [Data](#Data)
1. [Train](#Train)
1. [Inference](#Inference)
## Introduction
This notebook demonstrates Named Entity Recognition (NER) with text data using AutoML NLP.
AutoML highlights here include using end to end deep learning for NLP tasks like NER.
Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook.
Notebook synopsis:
1. Creating an Experiment in an existing Workspace
2. Configuration and remote run of AutoML for CoNLL 2003 dataset for NER task
3. Evaluating the trained model on a test set
## Setup
```
import logging
import os
import tempfile
import pandas as pd
import azureml.core
from azureml.core import Dataset
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.core.dataset import Dataset
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
from azureml.core.compute_target import ComputeTargetException
from azureml.data.datapath import DataPath
from azureml.core.run import Run
from azureml.core.script_run_config import ScriptRunConfig
from azureml.train.automl import AutoMLConfig
```
This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
```
print("This notebook was created using version 1.39.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
```
As part of the setup you have already created a <b>Workspace</b>. To run AutoML, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve, while a Run corresponds to a specific approach to the problem.
```
ws = Workspace.from_config()
# Choose an experiment name.
experiment_name = "automl-nlp-text-ner"
experiment = Experiment(ws, experiment_name)
output = {}
output["Subscription ID"] = ws.subscription_id
output["Workspace Name"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Experiment Name"] = experiment.name
pd.set_option("display.max_colwidth", None)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
```
## Set up a compute cluster
This section uses a user-provided compute cluster (named "gpu-compute" in this example). If a cluster with this name does not exist in the user's workspace, the below code will create a new cluster. You can choose the parameters of the cluster as mentioned in the comments.
```
num_nodes = 1
# Choose a name for your cluster.
amlcompute_cluster_name = "gpu-compute"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)
print("Found existing cluster, use it.")
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_NC6", max_nodes=num_nodes # Use GPU only
)
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
```
# Data
```
# Upload dataset to datastore
data_dir = "data" # Local directory to store data
blobstore_datadir = data_dir # Blob store directory to store data in
datastore = ws.get_default_datastore()
target = DataPath(datastore=datastore, path_on_datastore=blobstore_datadir)
Dataset.File.upload_directory(
src_dir=data_dir, target=target, overwrite=True, show_progress=True
)
datastore_path = [(datastore, blobstore_datadir + "/train.txt")]
train_data = Dataset.File.from_files(path=datastore_path)
datastore_path = [(datastore, blobstore_datadir + "/dev.txt")]
val_data = Dataset.File.from_files(path=datastore_path)
train_data = train_data.register(
workspace=ws,
name="CoNLL_2003_train",
description="NER train data",
create_new_version=True,
)
val_data = val_data.register(
workspace=ws,
name="CoNLL_2003_val",
description="NER val data",
create_new_version=True,
)
```
# Train
## Submit AutoML run
Here we do not set `primary_metric` parameter as we only train one model and we do not need to rank trained models. The run will use default primary metrics, `accuracy`. But it is only for reporting purpose.
```
automl_settings = {
"verbosity": logging.INFO,
}
automl_config = AutoMLConfig(
task="text-ner",
debug_log="automl_errors.log",
compute_target=compute_target,
training_data=train_data,
validation_data=val_data,
**automl_settings
)
```
#### Submit AutoML Run
```
automl_run = experiment.submit(automl_config, show_output=False)
_ = automl_run.wait_for_completion(show_output=False)
```
## Download Metrics
These metrics logged with the training run are computed with the trained model on validation dataset
```
validation_metrics = automl_run.get_metrics()
pd.DataFrame(
{"metric_name": validation_metrics.keys(), "value": validation_metrics.values()}
)
```
You can also get the best run id and the best model with `get_output` method.
```
best_run, best_model = automl_run.get_output()
best_run
```
# Inference
Now you can use the trained model to do inference on unseen data. We use a `ScriptRun` to do this, with script that we provide. The following blocks will register the test dataset, download the inference script and trigger the inference run. Unlink multiclass or multilabel scenario, the inference runs for NER saves the evaluation metrics. So we do not have to download the predictions, but directly get the metrics.
## Submit Inference Run
```
datastore_path = [(datastore, blobstore_datadir + "/test.txt")]
test_data = Dataset.File.from_files(path=datastore_path)
test_data = test_data.register(
workspace=ws, name="CoNLL_2003_test", description="NER test data"
)
# Load training script run corresponding to AutoML run above.
training_run_id = best_run.id
training_run = Run(experiment, training_run_id)
# Inference script run arguments
arguments = [
"--run_id",
training_run_id,
"--experiment_name",
experiment.name,
"--input_dataset_id",
test_data.as_named_input("test_data"),
]
scoring_args = arguments
with tempfile.TemporaryDirectory() as tmpdir:
# Download required files from training run into temp folder.
entry_script_name = "score_script.py"
output_path = os.path.join(tmpdir, entry_script_name)
training_run.download_file(
"outputs/" + entry_script_name, os.path.join(tmpdir, entry_script_name)
)
script_run_config = ScriptRunConfig(
source_directory=tmpdir,
script=entry_script_name,
compute_target=compute_target,
environment=training_run.get_environment(),
arguments=scoring_args,
)
scoring_run = experiment.submit(script_run_config)
scoring_run
_ = scoring_run.wait_for_completion(show_output=False)
```
## Get Evaluation Metrics
```
test_metrics = scoring_run.get_metrics()
test_metrics
pd.DataFrame(
{"metric name": list(test_metrics.keys()), "value": list(test_metrics.values())}
)
```
|
github_jupyter
|
import logging
import os
import tempfile
import pandas as pd
import azureml.core
from azureml.core import Dataset
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.core.dataset import Dataset
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
from azureml.core.compute_target import ComputeTargetException
from azureml.data.datapath import DataPath
from azureml.core.run import Run
from azureml.core.script_run_config import ScriptRunConfig
from azureml.train.automl import AutoMLConfig
print("This notebook was created using version 1.39.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
ws = Workspace.from_config()
# Choose an experiment name.
experiment_name = "automl-nlp-text-ner"
experiment = Experiment(ws, experiment_name)
output = {}
output["Subscription ID"] = ws.subscription_id
output["Workspace Name"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Experiment Name"] = experiment.name
pd.set_option("display.max_colwidth", None)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
num_nodes = 1
# Choose a name for your cluster.
amlcompute_cluster_name = "gpu-compute"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)
print("Found existing cluster, use it.")
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_NC6", max_nodes=num_nodes # Use GPU only
)
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# Upload dataset to datastore
data_dir = "data" # Local directory to store data
blobstore_datadir = data_dir # Blob store directory to store data in
datastore = ws.get_default_datastore()
target = DataPath(datastore=datastore, path_on_datastore=blobstore_datadir)
Dataset.File.upload_directory(
src_dir=data_dir, target=target, overwrite=True, show_progress=True
)
datastore_path = [(datastore, blobstore_datadir + "/train.txt")]
train_data = Dataset.File.from_files(path=datastore_path)
datastore_path = [(datastore, blobstore_datadir + "/dev.txt")]
val_data = Dataset.File.from_files(path=datastore_path)
train_data = train_data.register(
workspace=ws,
name="CoNLL_2003_train",
description="NER train data",
create_new_version=True,
)
val_data = val_data.register(
workspace=ws,
name="CoNLL_2003_val",
description="NER val data",
create_new_version=True,
)
automl_settings = {
"verbosity": logging.INFO,
}
automl_config = AutoMLConfig(
task="text-ner",
debug_log="automl_errors.log",
compute_target=compute_target,
training_data=train_data,
validation_data=val_data,
**automl_settings
)
automl_run = experiment.submit(automl_config, show_output=False)
_ = automl_run.wait_for_completion(show_output=False)
validation_metrics = automl_run.get_metrics()
pd.DataFrame(
{"metric_name": validation_metrics.keys(), "value": validation_metrics.values()}
)
best_run, best_model = automl_run.get_output()
best_run
datastore_path = [(datastore, blobstore_datadir + "/test.txt")]
test_data = Dataset.File.from_files(path=datastore_path)
test_data = test_data.register(
workspace=ws, name="CoNLL_2003_test", description="NER test data"
)
# Load training script run corresponding to AutoML run above.
training_run_id = best_run.id
training_run = Run(experiment, training_run_id)
# Inference script run arguments
arguments = [
"--run_id",
training_run_id,
"--experiment_name",
experiment.name,
"--input_dataset_id",
test_data.as_named_input("test_data"),
]
scoring_args = arguments
with tempfile.TemporaryDirectory() as tmpdir:
# Download required files from training run into temp folder.
entry_script_name = "score_script.py"
output_path = os.path.join(tmpdir, entry_script_name)
training_run.download_file(
"outputs/" + entry_script_name, os.path.join(tmpdir, entry_script_name)
)
script_run_config = ScriptRunConfig(
source_directory=tmpdir,
script=entry_script_name,
compute_target=compute_target,
environment=training_run.get_environment(),
arguments=scoring_args,
)
scoring_run = experiment.submit(script_run_config)
scoring_run
_ = scoring_run.wait_for_completion(show_output=False)
test_metrics = scoring_run.get_metrics()
test_metrics
pd.DataFrame(
{"metric name": list(test_metrics.keys()), "value": list(test_metrics.values())}
)
| 0.585457 | 0.897021 |
## assignment
Hengchao Wang
1001778272
```
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
import sklearn
import prettytable as pt
import numpy as np
import tensorflow as tf
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge
from sklearn.preprocessing import normalize,StandardScaler,PolynomialFeatures
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
res = {}
```
## a. Generate 20 data pairs (X, Y) using y = sin(2\*pi\*X) + N
```
x_sin = np.linspace(0,1,200)
x = np.random.uniform(0,1,20)
d = np.random.normal(loc=0,scale=0.2,size=20) # N from the normal gaussian distribution
print(d)
y_sin = 2*math.pi*x_sin
y = 2*math.pi*x
print(y)
for i in range(200):
y_sin[i] = math.sin(y_sin[i])
for i in range(20):
y[i] = math.sin(y[i])+ d[i]
plt.plot(x_sin, y_sin, "r-") # original sin function curve
plt.scatter(x, y)
data_1 = {'X':x, 'Y':y}
data = pd.DataFrame(data = data_1, dtype=np.int8)
data
X_train,X_test, Y_train, Y_test =model_selection.train_test_split(x, y, test_size=0.5, random_state=3)
train = {'X':X_train, 'Y': Y_train}
train_data = pd.DataFrame(data = train, dtype=np.int8)
train_data
test = {'X':X_test, 'Y': Y_test}
test_data = pd.DataFrame(data = test, dtype=np.int8)
test_data
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
```
## b. Using room mean square error, find weights of polynomial regression for order is 0, 1, 3, 9
```
def polynomialRegression(i:int ) :
polynomial = PolynomialFeatures(degree = i)# quadratic polynomial
x_transformed = polynomial.fit_transform(X_train.reshape(10,1))
poly_linear_model = LinearRegression()
poly_linear_model.fit(x_transformed, Y_train)# train
return polynomial, poly_linear_model
```
### weights of polynomial regression for order is 0
```
polynomial_0, poly_linear_model_0 = polynomialRegression(0)
coef = poly_linear_model_0.coef_
tmp = [0]*10
for i in range(len(coef)) :
tmp[i] = int(coef[i])
res['0'] = tmp
coef
```
### weights of polynomial regression for order is 1
```
polynomial_1, poly_linear_model_1 = polynomialRegression(1)
coef = poly_linear_model_1.coef_
tmp = [0]*10
for i in range(len(coef)) :
tmp[i] = int(coef[i])
res['1'] = tmp
coef
```
### weights of polynomial regression for order is 3
```
polynomial_3, poly_linear_model_3 = polynomialRegression(3)
coef = poly_linear_model_3.coef_
tmp = [0]*10
for i in range(len(coef)) :
tmp[i] = int(coef[i])
res['3'] = tmp
coef
```
### weights of polynomial regression for order is 9
```
polynomial_9, poly_linear_model_9 = polynomialRegression(9)
coef = poly_linear_model_9.coef_
tmp = [0]*10
for i in range(len(coef)) :
tmp[i] = int(coef[i])
res['9'] = tmp
coef
```
## c. Display weights in table
```
from prettytable import PrettyTable
x= PrettyTable()
x.add_column("label\order", ["W0","W1","W2","W3","W4","W5","W6","W7","W8","W9"])
x.add_column("0", res["0"])
x.add_column("1", res["1"])
x.add_column("3", res["3"])
x.add_column("9", res["9"])
print(x)
# the label 0, W0 in the table is the weights of polynomial regression for order is 0
# the label 1, W0 and W1 in the table is the weights of polynomial regression for order is 1
# the label 3, W0, W1, W2 and W3 in the table is the weights of polynomial regression for order is 3
# the label 9, W0-W9 in the table is the weights of polynomial regression for order is 9
```
## d. Draw a chart of fit data
### weights of polynomial regression for order is 0
```
xx = np.linspace(0, 1, 100)
xx_transformed_0 = polynomial_0.fit_transform(xx.reshape(xx.shape[0], 1))
yy = poly_linear_model_0.predict(xx_transformed_0)
plt.plot(xx, yy,label="$y = N$")
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
plt.legend()
```
### weights of polynomial regression for order is 1
```
xx = np.linspace(0, 1, 100)
xx_transformed_1 = polynomial_1.fit_transform(xx.reshape(xx.shape[0], 1))
yy = poly_linear_model_1.predict(xx_transformed_1)
plt.plot(xx, yy,label="$y = ax$")
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
plt.legend()
```
### weights of polynomial regression for order is 3
```
xx = np.linspace(0, 1, 100)
xx_transformed_3 = polynomial_3.fit_transform(xx.reshape(xx.shape[0], 1))
yy = poly_linear_model_3.predict(xx_transformed_3)
plt.plot(xx, yy,label="$y = ax3+bx2+cx+d$")
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
plt.legend()
```
### weights of polynomial regression for order is 9
```
xx = np.linspace(0, 1, 100)
xx_transformed_9 = polynomial_9.fit_transform(xx.reshape(xx.shape[0], 1))
yy = poly_linear_model_9.predict(xx_transformed_9)
plt.plot(xx, yy,label="$y = ax9+....$")
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
plt.ylim(-1.5 ,1.5)
plt.legend()
```
## e. Draw train error vs test error
```
train_error = [0]*10 #train error
test_error = [0]*10 #test error
def getMse(Y, yy):
standard = tf.square(Y - yy)
mse = tf.reduce_mean(standard)
return mse.numpy()
def getError(i:int, model) :
polynomial = PolynomialFeatures(degree = i)
xx_transformed_test = polynomial.fit_transform(X_test.reshape(X_test.shape[0], 1))
xx_transformed_train = polynomial.fit_transform(X_train.reshape(X_test.shape[0], 1))
yy_test = model.predict(xx_transformed_test)
yy_train = model.predict(xx_transformed_train)
test_error[i] = getMse(Y_test, yy_test)
train_error[i] = getMse(Y_train, yy_train)
polynomial_2, poly_linear_model_2 = polynomialRegression(2)
polynomial_4, poly_linear_model_4 = polynomialRegression(4)
polynomial_5, poly_linear_model_5 = polynomialRegression(5)
polynomial_6, poly_linear_model_6 = polynomialRegression(6)
polynomial_7, poly_linear_model_7 = polynomialRegression(7)
polynomial_8, poly_linear_model_8 = polynomialRegression(8)
# 0,1,3,9 I used the model fitted before.
getError(0, poly_linear_model_0)
getError(1, poly_linear_model_1)
getError(2, poly_linear_model_2)
getError(3, poly_linear_model_3)
getError(4, poly_linear_model_4)
getError(5, poly_linear_model_5)
getError(6, poly_linear_model_6)
getError(7, poly_linear_model_7)
getError(8, poly_linear_model_8)
getError(9, poly_linear_model_9)
print(test_error)
print(train_error)
xx = np.linspace(0, 9, 10) # error chart
plt.ylim(0 ,1)
plt.xlim(0,9)
plt.plot(xx, test_error, label = "$test error$", c = 'r')
plt.plot(xx, train_error, label = "$train error$", c = 'b')
plt.xlabel('Orders')
plt.ylabel('Error')
plt.legend()
```
## f. Generate 100 more data and fit 9th order model and draw fit
```
x_100 = np.linspace(0,1,100) # Gegerate new 100 samples
d_100 = np.random.normal(loc=0,scale=0.2,size=100) # N from the normal gaussian distribution
y_100 = 2*math.pi*x_100
for i in range(100):
y_100[i] = math.sin(y_100[i])+ d_100[i]
data_1 = {'X':x_100, 'Y':y_100}
data_100 = pd.DataFrame(data = data_1, dtype=np.int8)
data_100
plt.scatter(x_100, y_100, marker = "o",c = "r")
polynomial = PolynomialFeatures(degree = 9)# quadratic polynomial
x_transformed = polynomial.fit_transform(x_100.reshape(100,1))
poly_linear_model = LinearRegression()
poly_linear_model.fit(x_transformed, y_100)# train
xx_transformed_9 = polynomial.fit_transform(x_100.reshape(x_100.shape[0], 1))
yy = poly_linear_model.predict(xx_transformed_9)
plt.plot(x_100, yy,label="$y = ax9+.....$")
plt.scatter(x_100, y_100, c = "r")
plt.legend()
```
## g. Regularize using the sum of weights.
## h. Draw chart for lamda
```
def regularizeRidge(alpha):
if alpha < 0: alpha = math.exp(alpha) # easy to calculate lambda. if lambda < 0, we calculate it as ln(lambda).
else:
print("alpha = ",alpha)
if alpha != 0: print("ln(alpha) = ", math.log(alpha))
polynomial = PolynomialFeatures(degree = 9)# quadratic polynomial
x_transformed = polynomial.fit_transform(X_train.reshape(10,1))
poly_linear_model = Ridge(alpha = alpha)
poly_linear_model.fit(x_transformed, Y_train)# train
return poly_linear_model
def chartRidge(alpha):
model = regularizeRidge(alpha)
xx = np.linspace(0, 1, 100)
x_transformed = polynomial.fit_transform(xx.reshape(100,1))
yy = model.predict(x_transformed)
plt.plot(xx, yy,label=alpha)
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
plt.legend()
chartRidge(0) #, lambda = 0
chartRidge(0.1) #ln(lambda) = -4.6051701860e+0, lambda = 0.1
chartRidge(0.01) #ln(lambda) = -25, lambda = 1.3887943864964021e-11
chartRidge(0.001) #ln(lambda) = -20, lambda = 2.061153622438558e-09
chartRidge(0.0001) #ln(lambda) = -15, lambda = 3.059023205018258e-07
```
## i. Draw test and train error according to lamda
```
train_error_ridge = np.zeros(30)
test_error_ridge = np.zeros(30)
def getErrorRidge(i:int, model) : # A new error function
xx_transformed_test = polynomial.fit_transform(X_test.reshape(X_test.shape[0], 1))
xx_transformed_train = polynomial.fit_transform(X_train.reshape(X_train.shape[0], 1))
yy_test = model.predict(xx_transformed_test)
yy_train = model.predict(xx_transformed_train)
test_error_ridge[i] = getMse(Y_test, yy_test)
train_error_ridge[i] = getMse(Y_train, yy_train)
xx = list(range(-30, 0))
for i in xx:
model = regularizeRidge(i)
getErrorRidge(i, model)
xx = list(range(-30, 0))
plt.ylim(0 ,0.5)
plt.xlim(-30,0)
plt.plot(xx, test_error_ridge, label = "$test-error$", c = 'r')
plt.plot(xx, train_error_ridge, label = "$train-error$", c = 'b')
plt.xlabel('ln(lamdba)')
plt.ylabel('Error')
plt.legend()
# get the best lambda
best_lambda = 0
for i in range(-30,0):
if test_error_ridge[i+30] == test_error_ridge.min(): best_lambda = i
print("best ln(lambda) = ", best_lambda)
best_lambda_0 = math.exp(best_lambda)
print("best lambda = ", best_lambda_0)
print("In summary, the model which ln(lamdba) = ",best_lambda,", lambda = ",best_lambda_0," has the best test performance.")
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
import sklearn
import prettytable as pt
import numpy as np
import tensorflow as tf
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge
from sklearn.preprocessing import normalize,StandardScaler,PolynomialFeatures
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
res = {}
x_sin = np.linspace(0,1,200)
x = np.random.uniform(0,1,20)
d = np.random.normal(loc=0,scale=0.2,size=20) # N from the normal gaussian distribution
print(d)
y_sin = 2*math.pi*x_sin
y = 2*math.pi*x
print(y)
for i in range(200):
y_sin[i] = math.sin(y_sin[i])
for i in range(20):
y[i] = math.sin(y[i])+ d[i]
plt.plot(x_sin, y_sin, "r-") # original sin function curve
plt.scatter(x, y)
data_1 = {'X':x, 'Y':y}
data = pd.DataFrame(data = data_1, dtype=np.int8)
data
X_train,X_test, Y_train, Y_test =model_selection.train_test_split(x, y, test_size=0.5, random_state=3)
train = {'X':X_train, 'Y': Y_train}
train_data = pd.DataFrame(data = train, dtype=np.int8)
train_data
test = {'X':X_test, 'Y': Y_test}
test_data = pd.DataFrame(data = test, dtype=np.int8)
test_data
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
def polynomialRegression(i:int ) :
polynomial = PolynomialFeatures(degree = i)# quadratic polynomial
x_transformed = polynomial.fit_transform(X_train.reshape(10,1))
poly_linear_model = LinearRegression()
poly_linear_model.fit(x_transformed, Y_train)# train
return polynomial, poly_linear_model
polynomial_0, poly_linear_model_0 = polynomialRegression(0)
coef = poly_linear_model_0.coef_
tmp = [0]*10
for i in range(len(coef)) :
tmp[i] = int(coef[i])
res['0'] = tmp
coef
polynomial_1, poly_linear_model_1 = polynomialRegression(1)
coef = poly_linear_model_1.coef_
tmp = [0]*10
for i in range(len(coef)) :
tmp[i] = int(coef[i])
res['1'] = tmp
coef
polynomial_3, poly_linear_model_3 = polynomialRegression(3)
coef = poly_linear_model_3.coef_
tmp = [0]*10
for i in range(len(coef)) :
tmp[i] = int(coef[i])
res['3'] = tmp
coef
polynomial_9, poly_linear_model_9 = polynomialRegression(9)
coef = poly_linear_model_9.coef_
tmp = [0]*10
for i in range(len(coef)) :
tmp[i] = int(coef[i])
res['9'] = tmp
coef
from prettytable import PrettyTable
x= PrettyTable()
x.add_column("label\order", ["W0","W1","W2","W3","W4","W5","W6","W7","W8","W9"])
x.add_column("0", res["0"])
x.add_column("1", res["1"])
x.add_column("3", res["3"])
x.add_column("9", res["9"])
print(x)
# the label 0, W0 in the table is the weights of polynomial regression for order is 0
# the label 1, W0 and W1 in the table is the weights of polynomial regression for order is 1
# the label 3, W0, W1, W2 and W3 in the table is the weights of polynomial regression for order is 3
# the label 9, W0-W9 in the table is the weights of polynomial regression for order is 9
xx = np.linspace(0, 1, 100)
xx_transformed_0 = polynomial_0.fit_transform(xx.reshape(xx.shape[0], 1))
yy = poly_linear_model_0.predict(xx_transformed_0)
plt.plot(xx, yy,label="$y = N$")
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
plt.legend()
xx = np.linspace(0, 1, 100)
xx_transformed_1 = polynomial_1.fit_transform(xx.reshape(xx.shape[0], 1))
yy = poly_linear_model_1.predict(xx_transformed_1)
plt.plot(xx, yy,label="$y = ax$")
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
plt.legend()
xx = np.linspace(0, 1, 100)
xx_transformed_3 = polynomial_3.fit_transform(xx.reshape(xx.shape[0], 1))
yy = poly_linear_model_3.predict(xx_transformed_3)
plt.plot(xx, yy,label="$y = ax3+bx2+cx+d$")
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
plt.legend()
xx = np.linspace(0, 1, 100)
xx_transformed_9 = polynomial_9.fit_transform(xx.reshape(xx.shape[0], 1))
yy = poly_linear_model_9.predict(xx_transformed_9)
plt.plot(xx, yy,label="$y = ax9+....$")
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
plt.ylim(-1.5 ,1.5)
plt.legend()
train_error = [0]*10 #train error
test_error = [0]*10 #test error
def getMse(Y, yy):
standard = tf.square(Y - yy)
mse = tf.reduce_mean(standard)
return mse.numpy()
def getError(i:int, model) :
polynomial = PolynomialFeatures(degree = i)
xx_transformed_test = polynomial.fit_transform(X_test.reshape(X_test.shape[0], 1))
xx_transformed_train = polynomial.fit_transform(X_train.reshape(X_test.shape[0], 1))
yy_test = model.predict(xx_transformed_test)
yy_train = model.predict(xx_transformed_train)
test_error[i] = getMse(Y_test, yy_test)
train_error[i] = getMse(Y_train, yy_train)
polynomial_2, poly_linear_model_2 = polynomialRegression(2)
polynomial_4, poly_linear_model_4 = polynomialRegression(4)
polynomial_5, poly_linear_model_5 = polynomialRegression(5)
polynomial_6, poly_linear_model_6 = polynomialRegression(6)
polynomial_7, poly_linear_model_7 = polynomialRegression(7)
polynomial_8, poly_linear_model_8 = polynomialRegression(8)
# 0,1,3,9 I used the model fitted before.
getError(0, poly_linear_model_0)
getError(1, poly_linear_model_1)
getError(2, poly_linear_model_2)
getError(3, poly_linear_model_3)
getError(4, poly_linear_model_4)
getError(5, poly_linear_model_5)
getError(6, poly_linear_model_6)
getError(7, poly_linear_model_7)
getError(8, poly_linear_model_8)
getError(9, poly_linear_model_9)
print(test_error)
print(train_error)
xx = np.linspace(0, 9, 10) # error chart
plt.ylim(0 ,1)
plt.xlim(0,9)
plt.plot(xx, test_error, label = "$test error$", c = 'r')
plt.plot(xx, train_error, label = "$train error$", c = 'b')
plt.xlabel('Orders')
plt.ylabel('Error')
plt.legend()
x_100 = np.linspace(0,1,100) # Gegerate new 100 samples
d_100 = np.random.normal(loc=0,scale=0.2,size=100) # N from the normal gaussian distribution
y_100 = 2*math.pi*x_100
for i in range(100):
y_100[i] = math.sin(y_100[i])+ d_100[i]
data_1 = {'X':x_100, 'Y':y_100}
data_100 = pd.DataFrame(data = data_1, dtype=np.int8)
data_100
plt.scatter(x_100, y_100, marker = "o",c = "r")
polynomial = PolynomialFeatures(degree = 9)# quadratic polynomial
x_transformed = polynomial.fit_transform(x_100.reshape(100,1))
poly_linear_model = LinearRegression()
poly_linear_model.fit(x_transformed, y_100)# train
xx_transformed_9 = polynomial.fit_transform(x_100.reshape(x_100.shape[0], 1))
yy = poly_linear_model.predict(xx_transformed_9)
plt.plot(x_100, yy,label="$y = ax9+.....$")
plt.scatter(x_100, y_100, c = "r")
plt.legend()
def regularizeRidge(alpha):
if alpha < 0: alpha = math.exp(alpha) # easy to calculate lambda. if lambda < 0, we calculate it as ln(lambda).
else:
print("alpha = ",alpha)
if alpha != 0: print("ln(alpha) = ", math.log(alpha))
polynomial = PolynomialFeatures(degree = 9)# quadratic polynomial
x_transformed = polynomial.fit_transform(X_train.reshape(10,1))
poly_linear_model = Ridge(alpha = alpha)
poly_linear_model.fit(x_transformed, Y_train)# train
return poly_linear_model
def chartRidge(alpha):
model = regularizeRidge(alpha)
xx = np.linspace(0, 1, 100)
x_transformed = polynomial.fit_transform(xx.reshape(100,1))
yy = model.predict(x_transformed)
plt.plot(xx, yy,label=alpha)
plt.scatter(X_train, Y_train)
plt.scatter(X_test, Y_test, c = 'r')
plt.legend()
chartRidge(0) #, lambda = 0
chartRidge(0.1) #ln(lambda) = -4.6051701860e+0, lambda = 0.1
chartRidge(0.01) #ln(lambda) = -25, lambda = 1.3887943864964021e-11
chartRidge(0.001) #ln(lambda) = -20, lambda = 2.061153622438558e-09
chartRidge(0.0001) #ln(lambda) = -15, lambda = 3.059023205018258e-07
train_error_ridge = np.zeros(30)
test_error_ridge = np.zeros(30)
def getErrorRidge(i:int, model) : # A new error function
xx_transformed_test = polynomial.fit_transform(X_test.reshape(X_test.shape[0], 1))
xx_transformed_train = polynomial.fit_transform(X_train.reshape(X_train.shape[0], 1))
yy_test = model.predict(xx_transformed_test)
yy_train = model.predict(xx_transformed_train)
test_error_ridge[i] = getMse(Y_test, yy_test)
train_error_ridge[i] = getMse(Y_train, yy_train)
xx = list(range(-30, 0))
for i in xx:
model = regularizeRidge(i)
getErrorRidge(i, model)
xx = list(range(-30, 0))
plt.ylim(0 ,0.5)
plt.xlim(-30,0)
plt.plot(xx, test_error_ridge, label = "$test-error$", c = 'r')
plt.plot(xx, train_error_ridge, label = "$train-error$", c = 'b')
plt.xlabel('ln(lamdba)')
plt.ylabel('Error')
plt.legend()
# get the best lambda
best_lambda = 0
for i in range(-30,0):
if test_error_ridge[i+30] == test_error_ridge.min(): best_lambda = i
print("best ln(lambda) = ", best_lambda)
best_lambda_0 = math.exp(best_lambda)
print("best lambda = ", best_lambda_0)
print("In summary, the model which ln(lamdba) = ",best_lambda,", lambda = ",best_lambda_0," has the best test performance.")
| 0.532425 | 0.874988 |
```
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
import zarr
import gcsfs
xr.set_options(display_style='html')
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
plt.rcParams['figure.figsize'] = 12, 6
df = pd.read_csv('https://storage.googleapis.com/cmip6/cmip6-zarr-consolidated-stores.csv')
df.head()
np.unique(df['variable_id'].values)
df.query("activity_id=='ScenarioMIP' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == 'tas' & experiment_id == 'ssp370'" )
query_string = ("activity_id=='{activity_id}' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == '{variable}' & experiment_id == '{ssprcp}'")
for ssp_rcp in ['historical', 'ssp126', 'ssp245', 'ssp370']:
for variable in ['tasmax', 'tasmin', 'pr']:
if ssp_rcp == 'historical':
activity_id = 'CMIP'
else:
activity_id = 'ScenarioMIP'
df_result = df.query(query_string.format(variable=variable,
ssprcp=ssp_rcp,
activity_id=activity_id))
num_models = len(df_result)
print("for %s, there are %.0f models for %s" %(ssp_rcp, num_models, variable))
df_ta = df.query("activity_id=='ScenarioMIP' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == 'tasmax' & experiment_id == 'ssp370'" )
df_ta
df_ta = df.query("activity_id=='ScenarioMIP' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == 'tasmin' & experiment_id == 'ssp370'" )
df_ta
df_ta = df.query("activity_id=='ScenarioMIP' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == 'pr' & experiment_id == 'ssp370'" )
df_ta
```
### Look at ESGF data availability
```
#!/usr/bin/env python
from __future__ import print_function
import requests
import xml.etree.ElementTree as ET
import numpy
# Author: Unknown
# I got the original version from a word document published by ESGF
# https://docs.google.com/document/d/1pxz1Kd3JHfFp8vR2JCVBfApbsHmbUQQstifhGNdc6U0/edit?usp=sharing
# API AT: https://github.com/ESGF/esgf.github.io/wiki/ESGF_Search_REST_API#results-pagination
def esgf_search(server="https://esgf-node.llnl.gov/esg-search/search",
files_type="OPENDAP", local_node=True, project="CMIP6",
verbose=False, format="application%2Fsolr%2Bjson",
use_csrf=False, **search):
client = requests.session()
payload = search
payload["project"] = project
payload["type"]= "File"
if local_node:
payload["distrib"] = "false"
if use_csrf:
client.get(server)
if 'csrftoken' in client.cookies:
# Django 1.6 and up
csrftoken = client.cookies['csrftoken']
else:
# older versions
csrftoken = client.cookies['csrf']
payload["csrfmiddlewaretoken"] = csrftoken
payload["format"] = format
offset = 0
numFound = 10000
all_files = []
files_type = files_type.upper()
while offset < numFound:
payload["offset"] = offset
url_keys = []
for k in payload:
url_keys += ["{}={}".format(k, payload[k])]
url = "{}/?{}".format(server, "&".join(url_keys))
print(url)
r = client.get(url)
r.raise_for_status()
resp = r.json()["response"]
numFound = int(resp["numFound"])
resp = resp["docs"]
offset += len(resp)
for d in resp:
if verbose:
for k in d:
print("{}: {}".format(k,d[k]))
url = d["url"]
for f in d["url"]:
sp = f.split("|")
if sp[-1] == files_type:
all_files.append(sp[0].split(".html")[0])
return sorted(all_files)
'''df_ta = df.query("activity_id=='ScenarioMIP' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == 'pr' & experiment_id == 'ssp370'" )
df_ta'''
result = esgf_search(server="https://esgf-node.llnl.gov/esg-search/search", activity_id='ScenarioMIP', table_id='day', variable_id='pr', experiment_id='ssp370',
member_id="r1i1p1f1")
result
```
|
github_jupyter
|
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
import zarr
import gcsfs
xr.set_options(display_style='html')
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
plt.rcParams['figure.figsize'] = 12, 6
df = pd.read_csv('https://storage.googleapis.com/cmip6/cmip6-zarr-consolidated-stores.csv')
df.head()
np.unique(df['variable_id'].values)
df.query("activity_id=='ScenarioMIP' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == 'tas' & experiment_id == 'ssp370'" )
query_string = ("activity_id=='{activity_id}' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == '{variable}' & experiment_id == '{ssprcp}'")
for ssp_rcp in ['historical', 'ssp126', 'ssp245', 'ssp370']:
for variable in ['tasmax', 'tasmin', 'pr']:
if ssp_rcp == 'historical':
activity_id = 'CMIP'
else:
activity_id = 'ScenarioMIP'
df_result = df.query(query_string.format(variable=variable,
ssprcp=ssp_rcp,
activity_id=activity_id))
num_models = len(df_result)
print("for %s, there are %.0f models for %s" %(ssp_rcp, num_models, variable))
df_ta = df.query("activity_id=='ScenarioMIP' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == 'tasmax' & experiment_id == 'ssp370'" )
df_ta
df_ta = df.query("activity_id=='ScenarioMIP' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == 'tasmin' & experiment_id == 'ssp370'" )
df_ta
df_ta = df.query("activity_id=='ScenarioMIP' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == 'pr' & experiment_id == 'ssp370'" )
df_ta
#!/usr/bin/env python
from __future__ import print_function
import requests
import xml.etree.ElementTree as ET
import numpy
# Author: Unknown
# I got the original version from a word document published by ESGF
# https://docs.google.com/document/d/1pxz1Kd3JHfFp8vR2JCVBfApbsHmbUQQstifhGNdc6U0/edit?usp=sharing
# API AT: https://github.com/ESGF/esgf.github.io/wiki/ESGF_Search_REST_API#results-pagination
def esgf_search(server="https://esgf-node.llnl.gov/esg-search/search",
files_type="OPENDAP", local_node=True, project="CMIP6",
verbose=False, format="application%2Fsolr%2Bjson",
use_csrf=False, **search):
client = requests.session()
payload = search
payload["project"] = project
payload["type"]= "File"
if local_node:
payload["distrib"] = "false"
if use_csrf:
client.get(server)
if 'csrftoken' in client.cookies:
# Django 1.6 and up
csrftoken = client.cookies['csrftoken']
else:
# older versions
csrftoken = client.cookies['csrf']
payload["csrfmiddlewaretoken"] = csrftoken
payload["format"] = format
offset = 0
numFound = 10000
all_files = []
files_type = files_type.upper()
while offset < numFound:
payload["offset"] = offset
url_keys = []
for k in payload:
url_keys += ["{}={}".format(k, payload[k])]
url = "{}/?{}".format(server, "&".join(url_keys))
print(url)
r = client.get(url)
r.raise_for_status()
resp = r.json()["response"]
numFound = int(resp["numFound"])
resp = resp["docs"]
offset += len(resp)
for d in resp:
if verbose:
for k in d:
print("{}: {}".format(k,d[k]))
url = d["url"]
for f in d["url"]:
sp = f.split("|")
if sp[-1] == files_type:
all_files.append(sp[0].split(".html")[0])
return sorted(all_files)
'''df_ta = df.query("activity_id=='ScenarioMIP' & member_id == 'r1i1p1f1' & table_id == 'day' & variable_id == 'pr' & experiment_id == 'ssp370'" )
df_ta'''
result = esgf_search(server="https://esgf-node.llnl.gov/esg-search/search", activity_id='ScenarioMIP', table_id='day', variable_id='pr', experiment_id='ssp370',
member_id="r1i1p1f1")
result
| 0.249264 | 0.426023 |
<a href="https://colab.research.google.com/github/JImMY5232/COMP-593/blob/main/lab%201.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## **Welcome to COMP 593!**
For your first lab, we will experiment with running a script, and saving our project to our personal github repositories:
##Installing Dependancies##
Dependancies are routines, Objects, and methods that a project requires. We add Dependancies to our project in the form of **Libraries** when we want to unlock functionality that already exists, this could be as simple as file IO or as complex as fully fledged Machine Learning libraries. Libraries can be added to our project manually, by downloading them and placing them in our runtime enviornment, or using a **Package Manager** such as PIP.
Run the below code to download the **pyfiglet** library, which we will use to generate some ASCII art.
```
pip install pyfiglet
```
There are *hundreds of thousands* of python libraries at your disposal. Some may suit your needs better than others depending on the goals of your scripts or applications. The [PyPi Repository](https://https://pypi.org/) contains a serchable database of packages that are installable via the pip package manager.
Run the code below to get an idea of the number of packages that are included for your user within Colab. You don't need to know what all of these do, but it should indicate that python is a very powerful language.
```
pip list
```
If you would like to see if pyfiglet was installed, you could scan the list above, or you could **pipe** the output of `pip list` to a console command known as `grep` that will filter for specific strings. This is an example of **redirecting output,** which you have learned about already.
```
pip list | grep pyfiglet
```
# Writing Our Script
The intention of this Colab introduction is to get you familiar with using google Colab to accomplish scripting goals. Today, we will be using the `pyfiglet` library we have just installed to output some text. To understand the methods available to us in `pyfiglet` we can look up [the github repository.](https://github.com/pwaller/pyfiglet)
***Remember: Since open source packages are at the mercy of their developers or maintainers, comprehensive documentation is never a guarantee.***
The help documentation outlines a command line `--help` argument, which means that documentation exists. We can't call command line arguments for imported libraries in colab, but we *can* accomplish the same goal in colab by using the python `help([Object])` function.
```
from pyfiglet import Figlet
help(Figlet)
```
Using this function we can see that the `Figlet` object has several methods available. This will bring us to your task:
**In the editor below, finish a script that accomplishes the following goals:**
1. Prompt the user to select from a list of 5 fonts.
2. Prompt the user to input the string they would like output in that font.
3. Render the text using the selected font.
*Hint:* Call the `getFonts()` method to get a list of the available fonts.
```
from pyfiglet import Figlet
#Instantiate a Figlet Object
f = Figlet()
#Prompt the user to select a font
selectedFont = input("Select a font: \n 1. avatar \n 2. banner3 \n 3. barbwire \n 4. big \n 5. binary \n >>> ")
#Prompt the user to type a message
my_art =f.renderText('The real magic')
print(my_art)
```
# Submitting your Script
Once you have succesfully completed your script, make sure to save it to your github account. (Take a look [at the official colab instructions](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb) if you need a refresher.)
|
github_jupyter
|
pip install pyfiglet
pip list
pip list | grep pyfiglet
from pyfiglet import Figlet
help(Figlet)
from pyfiglet import Figlet
#Instantiate a Figlet Object
f = Figlet()
#Prompt the user to select a font
selectedFont = input("Select a font: \n 1. avatar \n 2. banner3 \n 3. barbwire \n 4. big \n 5. binary \n >>> ")
#Prompt the user to type a message
my_art =f.renderText('The real magic')
print(my_art)
| 0.463687 | 0.985909 |
<h1>CS4618: Artificial Intelligence I</h1>
<h1>Vectors and Matrices</h1>
<h2>
Derek Bridge<br>
School of Computer Science and Information Technology<br>
University College Cork
</h2>
<h1>Initialization</h1>
$\newcommand{\Set}[1]{\{#1\}}$
$\newcommand{\Tuple}[1]{\langle#1\rangle}$
$\newcommand{\v}[1]{\pmb{#1}}$
$\newcommand{\cv}[1]{\begin{bmatrix}#1\end{bmatrix}}$
$\newcommand{\rv}[1]{[#1]}$
$\DeclareMathOperator{\argmax}{arg\,max}$
$\DeclareMathOperator{\argmin}{arg\,min}$
$\DeclareMathOperator{\dist}{dist}$
$\DeclareMathOperator{\abs}{abs}$
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import numpy as np
import numpy.linalg as npla
from math import sqrt
```
<h1>Doing Things with Data</h1>
<ul>
<li>All of these are about doing things with data:
<ul>
<li>data science, data analytics, machine learning, statistics, statistical machine learning, statistical inference,
data mining, knowledge discovery, pattern recognition, …
</li>
</ul>
</li>
<li>These fields have been given impetus by:
<ul>
<li>availability of lots of data (sometimes 'big data'), partly due to sensors, the Internet, …</li>
<li>availability of hardware for high volume storage and processing, including GPUs and TPUs, cloud computing, …
<!-- The next generation of Google's Pixel phones (Pixel 6) uses TPUs:
https://blog.google/products/pixel/google-tensor-debuts-new-pixel-6-fall/ -->
</li>
</ul>
</li>
<li>We use techniques discovered by these fields for tasks in AI such as prediction (regression, classification),
clustering, speech recognition, machine translation, …
</li>
<li>But, first, some background maths!</li>
</ul>
<h1>Matrices</h1>
<ul>
<li>A <b>matrix</b> is a rectangular array, in our case of real numbers.</li>
<li>
In general, we use bold capital letters, e.g. $\v{A}$, for matrices, e.g.
$$\v{A} = \begin{bmatrix}
2 & 4 & 0 \\
1 & 3 & 2
\end{bmatrix}
$$
</li>
<li>
A matrix with $m$ rows and $n$ columns is an <b>$m \times n$ matrix</b>.
<ul>
<li>
What are $m$ and $n$ for $\v{A}$?
</li>
</ul>
$m$ and $n$ are sometimes called its <b>dimensions</b>.
</li>
<li>
We refer to an <b>element</b> of a matrix either using subscripts or indexes:
<ul>
<li>
$\v{A}_{i,j}$ or $\v{A}[i,j]$ is the element in the $i$th row and $j$th column.
</li>
<li>
We will index from 1.
<ul>
<li>
However, we will sometimes use position 0 for 'technical' purposes.
</li>
<li>
And we must be aware that Python numpy arrays and matrices are 0-indexed.
</li>
</ul>
</li>
<li>
So what are $\v{A}_{2,1}$, $\v{A}_{1,2}$, $\v{A}_{0,0}$ and $\v{A}_{3, 2}$?
</li>
</ul>
</li>
</ul>
<h1>Vectors</h1>
<ul>
<li>A <b>vector</b> is a matrix that has only one column, i.e. a $m \times 1$ matrix.</li>
<li>
A vector with $m$ rows is called a <b>$m$-dimensional</b> vector.
</li>
<li>
In general, we use bold lowercase letters for vectors, e.g.
$$\v{x} = \cv{2\\4\\3}$$
<li>
Sometimes this is called a <b>column vector</b>.
</li>
<li>
Then, by contrast, a <b>row vector</b> is a matrix that has only one row, i.e. a $1 \times n$ matrix, e.g.
$$\rv{2, 4, 3}$$
</li>
<li>
Unless stated otherwise, a vector should be assumed to be a column vector.
</li>
<li>
We can refer to an element using a single subscript, again most of the time indexed from 1.
<ul>
<li>
So what is $\v{x}_1$?
</li>
</ul>
</li>
</ul>
<h1>Vectors and Matrices in Python</h1>
<ul>
<li>Of the many ways of representing vectors and matrices in Python, we will use two:
<ul>
<li>
pandas library:
<ul>
<li>for vectors: <code>Series</code>, a kind of one-dimensional array;</li>
<li>for matrices: <code>DataFrames</code>, which are tabular data structures of rows and (named) columns.
</ul>
</li>
<li>
numpy library:
<ul>
<li>numpy arrays, which can be one-dimensional, two-dimensional, or have more dimensions.
</ul>
The scikit-learn library expects its data to arrive as numpy arrays.
</li>
</ul>
</li>
</ul>
<h1>Using numpy arrays</h1>
```
# Vectors
# We will use a numpy 1d array, which we can create from a list
# But, done this way, there is no way for us to distinguish between column- and row-vectors
x = np.array([2, 4, 3])
# Matrices
# We will use a numpy 2d array, which we can create from a list of lists
A = np.array([[2, 4, 0], [1, 3, 2]])
```
<p>
We can see their dimensions:
</p>
```
x.ndim
x.shape
A.ndim
A.shape
```
<p>
Note that the shape is always a tuple. Hence, x.shape is (3,), not 3.
</p>
<!--
<p>
We can make it into a nested list using the reshape method, and then its shape is (3,1):
</p>
X = x.reshape((3,1))
X
X.shape
<p>
Reshaping it to $(3, 1)$ makes it more clearly a column vector: 3 rows, 1 column.
</p>
<p>
If we had reshaped it to $(1, 3)$, then it would a be more like a row vector: 1 row, 3 columns:
</p>
X = x.reshape((1, 3))
X
X.shape
<p>
In general, we won't reshape unless necessary. For vectors, we'll just work with 1d numpy arrays.
</p>
-->
We can use the reshape method to give us the same data but with a different shape.
```
B = A.reshape((3,2))
B
B.shape
```
<h1>Transpose</h1>
<ul>
<li>The <b>transpose</b> of $m \times n$ matrix $\v{A}$, written $\v{A}^T$, is the $n \times m$ matrix in
which the first row of $\v{A}$ becomes the first column of $\v{A}^T$, the second row of $\v{A}$ becomes
the second column of $\v{A}^T$, and so on:
<ul>
<li>
$\v{A}_{i,j}^T = \v{A}_{j,i}$ for all $i,j$
</li>
</ul>
</li>
<li>
E.g.
$$\v{A} = \begin{bmatrix}
2 & 4 & 0 \\
1 & 3 & 2
\end{bmatrix}\,\,\,\,\,\,\,\,\,\,
\v{A}^T = \begin{bmatrix}
2 & 1 \\
4 & 3 \\
0 & 2
\end{bmatrix}
$$
</li>
<li>
As a special case, if $\v{x}$ is a $m$-dimensional column vector ($m \times 1$), then $\v{x}^T$ is a
$m$-dimensional row vector ($1 \times m$), e.g.
$$\v{x} = \cv{2\\4\\3}\,\,\,\,\,\,\,\,\,\, \v{x}^T = \rv{2, 4, 3}$$
</li>
</ul>
<h2>Transpose in numpy</h2>
<ul>
<li>numpy arrays offer easy ways to compute their transpose: either the <code>transpose</code> method or
the <code>T</code> attribute:
</li>
</ul>
```
A = np.array([[2, 4, 0], [1, 3, 2]])
# Transpose as a method
A.transpose()
# Tranpose as an attribute
A.T
```
<h1>Tensors</h1>
<ul>
<li>A quantity (a number), often referred to in this context as a <b>scalar</b>, has no dimensions.</li>
<li>A vector has one dimension, $m$.</li>
<li>A matrix has two dimensions, $m$ and $n$.</li>
<li>We can also have objects that have three or more dimensions.</li>
<li>We refer to all of these objects as <b>tensors</b> and we refer to the number of dimensions as the <b>rank</b> of the tensor.
<ul>
<li>A scalar is a rank 0 tensor.</li>
<li>A vector is a rank 1 tensor.</li>
<li>A matrix is a rank 2 tensor.</li>
<li>And we can have rank 3 tensors, rank 4 tensors, and so on.</li>
</ul>
</li>
<li>Be warned that there are lots of different definitions of 'scalar, 'vector', 'dimension' and 'rank'
that you may find if you read around the subject. They may not all agree with my usage. My usage,
I believe, is consistent with the way we use these words in AI.
</li>
<li>The rest of this lecture continues to work only with scalars, vectors and matrices.</li>
</ul>
<h1>Scalar Addition and Scalar Multiplication</h1>
<ul>
<li>Scalar addition and multiplication both work <b>elementwise</b>, i.e.:
<ul>
<li>in scalar addition, we add the number to each element in the matrix;</li>
<li>in scalar multiplication, we multiply each element in the matrix by the number.</li>
</ul>
</li>
<li>E.g.
$$\v{A} =
\begin{bmatrix}
2 & 4 & 0 \\
1 & 3 & 2
\end{bmatrix}\,\,\,\,\,\,\,\,\,\,
2 + \v{A} =
\begin{bmatrix}
4 & 6 & 2 \\
3 & 5 & 4
\end{bmatrix}\,\,\,\,\,\,\,\,\,\,
2\v{A} =
\begin{bmatrix}
4 & 8 & 0 \\
2 & 6 & 4
\end{bmatrix}
$$
</li>
</ul>
<h2>Scalar Addition and Scalar Mutliplication in numpy</h2>
<ul>
<li>numpy arrays enable operations like these using the normal addition, subtraction, multiplication and division
operators and without writing for loops.
</li>
</ul>
```
A = np.array([[2, 4, 0], [1, 3, 2]])
2 + A
2 * A
```
<ul>
<li>Other Python operators also work:</li>
</ul>
```
A**2
```
<h1>Matrix Addition and Hadamard Product</h1>
<ul>
<li>Matrix addition and Hadamard product require two matrices that have <em>the same dimensions</em>.</li>
<li>They are also defined elementwise: by adding or multiplying <em>corresponding</em> elements.</li>
<li>E.g.
$$
\v{A} = \begin{bmatrix}
2 & 4 & 0 \\
1 & 3 & 2
\end{bmatrix}\,\,\,\,\,\,\,\,\,\,
\v{B} = \begin{bmatrix}
1 & 0 & 5 \\
2 & 3 & 2
\end{bmatrix}\,\,\,\,\,\,\,\,\,\,
\v{A}+\v{B} = \begin{bmatrix}
3 & 4 & 5 \\
3 & 6 & 4
\end{bmatrix}\,\,\,\,\,\,\,\,\,\,
\v{A}*\v{B} = \begin{bmatrix}
2 & 0 & 0 \\
2 & 9 & 4
\end{bmatrix}
$$
</li>
<li>In maths, Hadamard product is more often written with a dot ($\cdot$ or $\circ$), but we will use $\ast$.</li>
</ul>
<h2>Matrix Addition and Hadamard Product in numpy</h2>
<ul>
<li>We don't need to write any loops, just use <code>+</code> and <code>*</code>:</li>
</ul>
```
A = np.array([[2, 4, 0], [1, 3, 2]])
B = np.array([[1, 0, 5], [2, 3, 2]])
A + B
A * B
```
<h1>Matrix Multiplication</h1>
<ul>
<li>We can compute $\v{A}\v{B}$, the result of multiplying matrices $\v{A}$ and $\v{B}$, provided the number of columns
of $\v{A}$ equals the number of rows of $\v{B}$.
<ul>
<li>
If $\v{A}$ is a $m \times p$ matrix and $\v{B}$ is a $p \times n$ matrix, then we can compute $C = \v{A}\v{B}$.
</li>
<li>
$\v{C}$ will be a $m \times n$ matrix.
</li>
</ul>
</li>
<li>
$\v{C}_{i,j}$ is obtained by multiplying elements of the $i$th row of $\v{A}$ by corresponding elements
of the $j$th column of $\v{B}$ and summing:
$$\v{C}_{i,j} = \sum_{k=1}^p\v{A}_{i,k}\v{B}_{k,j}$$
</li>
<li>E.g.
$$\v{A} = \begin{bmatrix}
2 & 4 & 0 \\
1 & 3 & 2
\end{bmatrix}\,\,\,\,\,\,\,\,\,\,
\v{B} = \begin{bmatrix}
3 & 1 & 2\\
2 & 3 & 1\\
1 & 3 & 3
\end{bmatrix}\,\,\,\,\,\,\,\,\,\,
\v{A}\v{B} = \begin{bmatrix}
14 & 14 & 8\\
11 & 16 & 11
\end{bmatrix}
$$
</li>
<li>Since vectors are just one-column vectors, matrix multiplication can apply — provided the dimensions are OK, e.g.
$$\v{A} = \begin{bmatrix}
2 & 4 & 0 \\
1 & 3 & 2
\end{bmatrix}\,\,\,\,\,\,\,\,\,\,
\v{x} = \cv{2\\3\\1}\,\,\,\,\,\,\,\,\,\,
\v{y} = \cv{2\\3}\,\,\,\,\,\,\,\,\,\,
\v{A}\v{x} = \cv{16\\13}\,\,\,\,\,\,\,\,\,\,
\v{A}\v{y} \mbox{ is undefined}
$$
</li>
<li>What about carying out this operation if both are vectors? Well, they'd need to have the same dimension. For
example, here they are both 3-dimensional:
$$\v{x} = \cv{2\\3\\1}\,\,\,\,\,\,\,\,\,\,\v{y} = \cv{-1\\6\\4}$$
But, even if they are the same dimension, we cannot compute $\v{x}\v{y}$ because we need the number of columns
of $\v{x}$ (in this case, 1) to equal the number of rows of $\v{y}$ (in this case, 3).
</li>
<li>To get this to work, we need to use the transpose of $\v{x}$:
$$\v{x}^T = \rv{2,3,1}\,\,\,\,\,\,\,\,\,\,\v{y} = \cv{-1\\6\\4}\,\,\,\,\,\,\,\,\,\,\v{x}^T\v{y} = 20$$
The number of columns
of $\v{x}^T$ (3) is equal to the number of rows of $\v{y}$ (also 3).
Note how the result is a scalar. This operation is so common that it crops up with some other names including
the dot product or the scalar product and even the inner product (although, technically, the inner product is
more general concept).
</li>
</ul>
<h2>Matrix Multiplication in numpy</h2>
<ul>
<li>numpy offers <code>dot</code> as a function or method for matrix multiplication:
</ul>
```
A = np.array([[2, 4, 0], [1, 3, 2]])
B = np.array([[3, 1, 2], [2, 3, 1], [1, 3, 3]])
# Multiplication as a function
np.dot(A, B)
# Multiplication as a method
A.dot(B)
```
<ul>
<li>Remember, matrix multplication in numpy is done with <code>dot</code>, not *.</li>
<!--<li>Broadcasting does not apply to matrix multiplication, since it's not an elementwise operation</li>-->
</ul>
<h1>Identity Matrices</h1>
<ul>
<li>The $n \times n$ <b>identity matrix</b>, $\v{I}_n$, contains zeros except for entries on the main diagonal
(from top left to bottom right):
<ul>
<li>
$\v{I}_n[i,i] = 1$ for $i = 1,\ldots,n$ and $\v{I}_n[i,j] = 0$ for $i \neq j$
</li>
</ul>
<li>E.g.:
$$\v{I}_3 = \begin{bmatrix}
1 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & 1
\end{bmatrix}
$$
</li>
<li>
If $\v{A}$ is an $m \times n$ matrix then, $\v{A}\v{I}_n = \v{I}_m\v{A} = \v{A}$
</li>
</ul>
<h2>Identity Matrices in numpy</h2>
<ul>
<li>Create identity matrices using the <code>identity</code> function:</li>
</ul>
```
np.identity(3)
```
<h1>Inverses</h1>
<ul>
<li>If $\v{A}$ is a $n \times n$ matrix, then its <b>inverse</b>, $\v{A}^{-1}$ (<em>if it has one</em>) is also
a $n \times n$ matrix such that $\v{A}\v{A}^{-1} = \v{I}_n$.
</li>
<li>E.g.
$$\v{A} = \begin{bmatrix}
1 & 0 & 2 \\
2 & -1 & 3 \\
4 & 1 & 8
\end{bmatrix}\,\,\,\,\,\,\,\,\,\,
\v{A}^{-1} = \begin{bmatrix}
-11 & 2 & 2 \\
-4 & 0 & 1 \\
6 & -1 & -1
\end{bmatrix}
$$
</li>
<li>
Some $n \times n$ matrices do not have inverses, e.g.
$$\begin{bmatrix}
1 & 1 & 1 \\
1 & 1 & 1 \\
1 & 1 & 1
\end{bmatrix}$$
In these cases, provided the matrix is square, you can compute a <b>pseudo-inverse</b>, which you can use
for <em>some</em> of the same purposes instead.
</li>
</ul>
<h2>Inverses in numpy</h2>
<ul>
<li>numpy.linalg offers function <code>inv</code> for computing inverses, but also function
<code>pinv</code> for computing the Moore-Penrose pseudo-inverse:
</li>
</ul>
```
A = np.array([[1, 0, 2], [2, -1, 3], [4, 1, 8]])
npla.inv(A)
npla.pinv(A)
B = np.ones((3,3))
npla.inv(B) # raises an exception
npla.pinv(B)
```
<h1>Some numpy Methods</h1>
<ul>
<li>numpy offers methods for calculations that, in other languages, would require you to write loops</li>
<li>E.g. <code>sum</code>, <code>mean</code>, <code>min</code>, <code>max</code>, <code>argmin</code>, <code>argmax</code>,
…
</li>
</ul>
```
x = np.array([2, 4, 3])
A = np.array([[2, 4, 0], [1, 3, 2]])
x.sum()
A.sum()
```
<h1>Some numpy Universal Functions</h1>
<ul>
<li>Consider a function such as <code>sqrt</code>.</li>
<li>In Python, <code>sqrt</code> (from the <code>math</code> library) takes in a number but can't take in a
list of numbers:
</li>
</ul>
```
sqrt(9)
sqrt([1, 4, 9]) # Raises an exception
```
<ul>
<li>But, the corresponding numpy function can apply to arrays:</li>
</ul>
```
np.sqrt(9)
x = np.array([1, 4, 9])
np.sqrt(x)
A = np.array([[1, 4, 9], [16, 25, 36], [49, 64, 81]])
np.sqrt(A)
```
<ul>
<li>The function is applied elementwise</li>
<li>In numpy, these are called 'universal functions' (or 'ufuncs')</li>
<li>Others include: <code>abs</code>, <code>exp</code>, <code>log10</code>, …</li>
</ul>
<h1>Vectorization</h1>
<ul>
<li>Algorithms that might otherwise need for-loops and indexing can often be written much more succinctly by expressing them
in terms of operators, methods and functions that work on entire arrays.
</li>
<li>More than this, if your programming language has efficient implementations of
these operators, methods and functions, the resulting programs can run much faster too.
<ul>
<li>numpy's operators, methods and functions, for example, are typically one or more orders of magnitude faster
than their pure Python equivalents (written using loops and indexing).
</li>
</ul>
So, avoid loops and indexing!
</li>
<li>Using fast array operators, methods and functions in this way is known as <b>vectorization</b>.
</li>
</ul>
```
def sum(L):
total = 0.0
for x in L:
total += x
return total
x = list(range(1, 201))
%timeit sum(x)
x = np.arange(1, 201)
%timeit np.sum(x)
```
<p>
By default, <code>timeit</code> runs your code enough times to get sufficient accuracy (100000 in the above) and computes the average run time, then it does it again,
and then again, and five more times, and tells you the best of these seven average run times. It does this to try to make its measurements
robust when other things are happening on your machine at the same time.
</p>
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
%matplotlib inline
import numpy as np
import numpy.linalg as npla
from math import sqrt
# Vectors
# We will use a numpy 1d array, which we can create from a list
# But, done this way, there is no way for us to distinguish between column- and row-vectors
x = np.array([2, 4, 3])
# Matrices
# We will use a numpy 2d array, which we can create from a list of lists
A = np.array([[2, 4, 0], [1, 3, 2]])
x.ndim
x.shape
A.ndim
A.shape
B = A.reshape((3,2))
B
B.shape
A = np.array([[2, 4, 0], [1, 3, 2]])
# Transpose as a method
A.transpose()
# Tranpose as an attribute
A.T
A = np.array([[2, 4, 0], [1, 3, 2]])
2 + A
2 * A
A**2
A = np.array([[2, 4, 0], [1, 3, 2]])
B = np.array([[1, 0, 5], [2, 3, 2]])
A + B
A * B
A = np.array([[2, 4, 0], [1, 3, 2]])
B = np.array([[3, 1, 2], [2, 3, 1], [1, 3, 3]])
# Multiplication as a function
np.dot(A, B)
# Multiplication as a method
A.dot(B)
np.identity(3)
A = np.array([[1, 0, 2], [2, -1, 3], [4, 1, 8]])
npla.inv(A)
npla.pinv(A)
B = np.ones((3,3))
npla.inv(B) # raises an exception
npla.pinv(B)
x = np.array([2, 4, 3])
A = np.array([[2, 4, 0], [1, 3, 2]])
x.sum()
A.sum()
sqrt(9)
sqrt([1, 4, 9]) # Raises an exception
np.sqrt(9)
x = np.array([1, 4, 9])
np.sqrt(x)
A = np.array([[1, 4, 9], [16, 25, 36], [49, 64, 81]])
np.sqrt(A)
def sum(L):
total = 0.0
for x in L:
total += x
return total
x = list(range(1, 201))
%timeit sum(x)
x = np.arange(1, 201)
%timeit np.sum(x)
| 0.421433 | 0.979765 |
# Lesson 4 - Collaborative Filtering
```
from fastai.collab import *
from fastai.tabular import *
```
## Collaborative filtering example
`collab` models use data in a `DataFrame` of user, items, and ratings.
```
user, item, title = 'userId', 'movieId', 'title'
path = untar_data(URLs.ML_SAMPLE) # MovieLens dataset sample
path
ratings = pd.read_csv(path / 'ratings.csv')
ratings.head()
```
That's all we need to create and train a model:
```
data = CollabDataBunch.from_df(ratings, seed=42)
y_range = [0, 5.5]
# Create a Learner for collaborative filtering on data
learn = collab_learner(data, n_factors=50, y_range=y_range)
%%time
learn.fit_one_cycle(3, 5e-3)
```
## Movielens 100k
Let's try with the full Movielens 100k data dataset, available from http://files.grouplens.org/datasets/movielens/ml-100k.zip
```
path = Path('data/ml-100k/')
!wget http://files.grouplens.org/datasets/movielens/ml-100k.zip -O data/ml-100k.zip
!unzip data/ml-100k.zip -d data
ratings = pd.read_csv(path / 'u.data', delimiter='\t', header=None,
names=[user, item, 'rating', 'timestamp'])
ratings.head()
movies = pd.read_csv(path / 'u.item', delimiter='|', encoding='latin-1', header=None,
names=[item, 'title', 'date', 'N', 'url', *[f'g{i}' for i in range(19)]])
movies.head()
len(ratings)
rating_movie = ratings.merge(movies[[item, title]])
rating_movie.head()
data = CollabDataBunch.from_df(rating_movie, seed=42, pct_val=0.1, item_name=title)
data.show_batch()
y_range = [0,5.5]
learn = collab_learner(data, n_factors=40, y_range=y_range, wd=1e-1)
learn.lr_find()
learn.recorder.plot(skip_end=15)
learn.fit_one_cycle(5, 5e-3)
learn.save('dotprod')
```
Here's [some benchmarks](https://www.librec.net/release/v1.3/example.html) on the same dataset for the popular Librec system for collaborative filtering. They show best results based on RMSE of 0.91, which corresponds to an MSE of `0.91**2 = 0.83`.
## Interpretation
### Setup
```
learn.load('dotprod');
learn.model
rating_movie.head()
g = rating_movie.groupby(title)['rating'].count()
top_movies = g.sort_values(ascending=False).index.values[:1000]
top_movies[:10]
```
### Movie bias
```
movie_bias = learn.bias(top_movies, is_item=True)
movie_bias.shape
mean_ratings = rating_movie.groupby(title)['rating'].mean()
movie_ratings = [(b, i, mean_ratings.loc[i]) for i, b in zip(top_movies, movie_bias)]
item0 = lambda o: o[0]
sorted(movie_ratings, key=item0)[:15]
sorted(movie_ratings, key=lambda o: o[0], reverse=True)[:15]
```
### Movie weights
```
movie_w = learn.weight(top_movies, is_item=True)
movie_w.shape
movie_pca = movie_w.pca(3)
movie_pca.shape
fac0, fac1, fac2 = movie_pca.t() # latent factors
movie_comp = [(f, i) for f, i in zip(fac0, top_movies)]
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
movie_comp = [(f, i) for f,i in zip(fac1, top_movies)]
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
idxs = np.random.choice(len(top_movies), 50, replace=False)
idxs = list(range(50))
X = fac0[idxs]
Y = fac2[idxs]
plt.figure(figsize=(15,15))
plt.scatter(X, Y)
for i, x, y in zip(top_movies[idxs], X, Y):
plt.text(x, y, i, color=np.random.rand(3)*0.7, fontsize=11)
plt.show()
```
|
github_jupyter
|
from fastai.collab import *
from fastai.tabular import *
user, item, title = 'userId', 'movieId', 'title'
path = untar_data(URLs.ML_SAMPLE) # MovieLens dataset sample
path
ratings = pd.read_csv(path / 'ratings.csv')
ratings.head()
data = CollabDataBunch.from_df(ratings, seed=42)
y_range = [0, 5.5]
# Create a Learner for collaborative filtering on data
learn = collab_learner(data, n_factors=50, y_range=y_range)
%%time
learn.fit_one_cycle(3, 5e-3)
path = Path('data/ml-100k/')
!wget http://files.grouplens.org/datasets/movielens/ml-100k.zip -O data/ml-100k.zip
!unzip data/ml-100k.zip -d data
ratings = pd.read_csv(path / 'u.data', delimiter='\t', header=None,
names=[user, item, 'rating', 'timestamp'])
ratings.head()
movies = pd.read_csv(path / 'u.item', delimiter='|', encoding='latin-1', header=None,
names=[item, 'title', 'date', 'N', 'url', *[f'g{i}' for i in range(19)]])
movies.head()
len(ratings)
rating_movie = ratings.merge(movies[[item, title]])
rating_movie.head()
data = CollabDataBunch.from_df(rating_movie, seed=42, pct_val=0.1, item_name=title)
data.show_batch()
y_range = [0,5.5]
learn = collab_learner(data, n_factors=40, y_range=y_range, wd=1e-1)
learn.lr_find()
learn.recorder.plot(skip_end=15)
learn.fit_one_cycle(5, 5e-3)
learn.save('dotprod')
learn.load('dotprod');
learn.model
rating_movie.head()
g = rating_movie.groupby(title)['rating'].count()
top_movies = g.sort_values(ascending=False).index.values[:1000]
top_movies[:10]
movie_bias = learn.bias(top_movies, is_item=True)
movie_bias.shape
mean_ratings = rating_movie.groupby(title)['rating'].mean()
movie_ratings = [(b, i, mean_ratings.loc[i]) for i, b in zip(top_movies, movie_bias)]
item0 = lambda o: o[0]
sorted(movie_ratings, key=item0)[:15]
sorted(movie_ratings, key=lambda o: o[0], reverse=True)[:15]
movie_w = learn.weight(top_movies, is_item=True)
movie_w.shape
movie_pca = movie_w.pca(3)
movie_pca.shape
fac0, fac1, fac2 = movie_pca.t() # latent factors
movie_comp = [(f, i) for f, i in zip(fac0, top_movies)]
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
movie_comp = [(f, i) for f,i in zip(fac1, top_movies)]
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
idxs = np.random.choice(len(top_movies), 50, replace=False)
idxs = list(range(50))
X = fac0[idxs]
Y = fac2[idxs]
plt.figure(figsize=(15,15))
plt.scatter(X, Y)
for i, x, y in zip(top_movies[idxs], X, Y):
plt.text(x, y, i, color=np.random.rand(3)*0.7, fontsize=11)
plt.show()
| 0.675122 | 0.908496 |
# Interpreting Bi-LSTM Sentiment Classification Models With Integrated Gradients
Interpretations of the predictions are generated and visualized using Integrated Gradients algorithm, specifically the `IntGradNLPInterpreter` class.
```
import paddle
import numpy as np
import interpretdl as it
from interpretdl.data_processor.visualizer import VisualizationTextRecord, visualize_text
```
If you have't done so, please first download the word dictionary that maps each word to an id.
## Bi-LSTM
### 加载数据集
以公开中文情感分析数据集ChnSenticorp为例。PaddleNLP已经内置该数据集,一键即可加载。
```
import paddlenlp as ppnlp
from paddlenlp.datasets import load_dataset
train_ds, dev_ds, test_ds = load_dataset(
"chnsenticorp", splits=["train", "dev", "test"])
print(train_ds.label_list)
for data in train_ds[:5]:
print(data)
```
每条数据包含一句评论和对应的标签,0或1。0代表负向评论,1代表正向评论。
之后,还需要对输入句子进行数据处理,如切词,映射词表id等。
### 构建词汇表
```
# downloads the word dict to assets/
# 在模型训练之前,需要先下载词汇表文件word_dict.txt,用于构造词-id映射关系。
!wget https://paddlenlp.bj.bcebos.com/data/senta_word_dict.txt -P assets/
from paddlenlp.data import JiebaTokenizer, Pad, Stack, Tuple, Vocab
VOCAB_PATH = "assets/senta_word_dict.txt"
vocab = Vocab.load_vocabulary(VOCAB_PATH, unk_token='[UNK]', pad_token='[PAD]')
vocab_size = len(vocab)
num_classes = len(train_ds.label_list)
pad_token_id = vocab.to_indices('[PAD]')
```
### 模型搭建
```
import paddle.nn as nn
import paddle.nn.functional as F
import paddlenlp
class LSTMModel(nn.Layer):
def __init__(self,
vocab_size,
num_classes,
emb_dim=128,
padding_idx=0,
lstm_hidden_size=198,
direction='forward',
lstm_layers=1,
dropout_rate=0.0,
pooling_type=None,
fc_hidden_size=96):
super().__init__()
# 首先将输入word id 查表后映射成 word embedding
self.embedder = nn.Embedding(
num_embeddings=vocab_size,
embedding_dim=emb_dim,
padding_idx=padding_idx)
# 将word embedding经过LSTMEncoder变换到文本语义表征空间中
self.lstm_encoder = paddlenlp.seq2vec.LSTMEncoder(
emb_dim,
lstm_hidden_size,
num_layers=lstm_layers,
direction=direction,
dropout=dropout_rate,
pooling_type=pooling_type)
# LSTMEncoder.get_output_dim()方法可以获取经过encoder之后的文本表示hidden_size
self.fc = nn.Linear(self.lstm_encoder.get_output_dim(), fc_hidden_size)
# 最后的分类器
self.output_layer = nn.Linear(fc_hidden_size, num_classes)
def forward(self, text, seq_len):
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens, num_directions*lstm_hidden_size)
# num_directions = 2 if direction is 'bidirectional' else 1
text_repr = self.lstm_encoder(embedded_text, sequence_length=seq_len)
# Shape: (batch_size, fc_hidden_size)
fc_out = paddle.tanh(self.fc(text_repr))
# Shape: (batch_size, num_classes)
logits = self.output_layer(fc_out)
return logits
model_ = LSTMModel(
len(vocab),
len(train_ds.label_list),
direction='bidirectional',
padding_idx=vocab['[PAD]'])
model = paddle.Model(model_)
```
### 组网训练
```
from functools import partial
def convert_example(example, tokenizer, is_test=False):
input_ids = tokenizer.encode(example["text"])
valid_length = np.array(len(input_ids), dtype='int64')
input_ids = np.array(input_ids, dtype='int64')
if not is_test:
label = np.array(example["label"], dtype="int64")
return input_ids, valid_length, label
else:
qid = np.array(example["qid"], dtype="int64")
return input_ids, valid_length, qid
def create_dataloader(dataset,
trans_fn=None,
mode='train',
batch_size=1,
use_gpu=False,
batchify_fn=None):
"""
Creats dataloader.
Args:
dataset(obj:`paddle.io.Dataset`): Dataset instance.
trans_fn(obj:`callable`, optional, defaults to `None`): function to convert a data sample to input ids, etc.
mode(obj:`str`, optional, defaults to obj:`train`): If mode is 'train', it will shuffle the dataset randomly.
batch_size(obj:`int`, optional, defaults to 1): The sample number of a mini-batch.
use_gpu(obj:`bool`, optional, defaults to obj:`False`): Whether to use gpu to run.
batchify_fn(obj:`callable`, optional, defaults to `None`): function to generate mini-batch data by merging
the sample list, None for only stack each fields of sample in axis
0(same as :attr::`np.stack(..., axis=0)`).
Returns:
dataloader(obj:`paddle.io.DataLoader`): The dataloader which generates batches.
"""
if trans_fn:
dataset = dataset.map(trans_fn)
if mode == 'train' and use_gpu:
sampler = paddle.io.DistributedBatchSampler(
dataset=dataset, batch_size=batch_size, shuffle=True)
else:
shuffle = True if mode == 'train' else False
sampler = paddle.io.BatchSampler(
dataset=dataset, batch_size=batch_size, shuffle=shuffle)
dataloader = paddle.io.DataLoader(
dataset,
batch_sampler=sampler,
return_list=True,
collate_fn=batchify_fn)
return dataloader
# Reads data and generates mini-batches.
tokenizer = JiebaTokenizer(vocab)
trans_fn = partial(convert_example, tokenizer=tokenizer, is_test=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=vocab.token_to_idx.get('[PAD]', 0)), # input_ids
Stack(dtype="int64"), # seq len
Stack(dtype="int64") # label
): [data for data in fn(samples)]
train_loader = create_dataloader(
train_ds,
trans_fn=trans_fn,
batch_size=128,
mode='train',
batchify_fn=batchify_fn)
dev_loader = create_dataloader(
dev_ds,
trans_fn=trans_fn,
batch_size=128,
mode='validation',
batchify_fn=batchify_fn)
optimizer = paddle.optimizer.Adam(
parameters=model.parameters(), learning_rate=5e-4)
# Defines loss and metric.
criterion = paddle.nn.CrossEntropyLoss()
metric = paddle.metric.Accuracy()
model.prepare(optimizer, criterion, metric)
model.fit(
train_loader,
dev_loader,
epochs=2,
save_dir='assets/checkpoint',
verbose=1
)
```
### 模型预测
训练保存好的训练,即可用于预测。如以下示例代码自定义预测数据,调`用`predict()`函数即可一键预测。
```
# 模型加载
state_dict = paddle.load("assets/checkpoint/final.pdparams")
model_.set_dict(state_dict)
```
## Interpreting transformer ERNIE TextClassification Models
Initialize the `IntGradNLPInterpreter`.
```
ig = it.IntGradNLPInterpreter(model_, True)
```
Define the reviews that we want to analyze.
```
reviews = [
'这个宾馆比较陈旧了,特价的房间也很一般。总体来说一般',
'作为老的四星酒店,房间依然很整洁,相当不错。机场接机服务很好,可以在车上办理入住手续,节省时间。'
]
import jieba
def preprocess_fn(data):
texts = []
seq_lens = []
for text in data:
tokens = " ".join(jieba.cut(text)).split(' ')
# tokens = text.split()
ids = []
unk_id = vocab.token_to_idx.get('[UNK]', None)
for token in tokens:
wid = vocab.token_to_idx.get(token, unk_id)
if wid:
ids.append(wid)
texts.append(ids)
seq_lens.append(len(ids))
pad_token_id = 0
max_seq_len = max(seq_lens)
for index, text in enumerate(texts):
seq_len = len(text)
if seq_len < max_seq_len:
padded_tokens = [pad_token_id for _ in range(max_seq_len - seq_len)]
new_text = text + padded_tokens
texts[index] = new_text
elif seq_len > max_seq_len:
new_text = text[:max_seq_len]
texts[index] = new_text
texts = paddle.to_tensor(texts)
texts.stop_gradient = False
seq_lens = paddle.to_tensor(seq_lens)
seq_lens.stop_gradient = False
return texts, seq_lens
```
Define a preprocessing function that processes a list of raw strings into model inputs.
In the cell below, we `interpret` reviews and grab weights for each token.
Since the output gradients are not grouped by reviews due to the LoDTensor inputs, we use the LoD information to group them into a list of lists.
```
pred_labels, pred_probs, avg_gradients = ig.interpret(
preprocess_fn(reviews),
return_pred=True,
visual=True)
sum_gradients = np.sum(avg_gradients, axis=-1).tolist()
new_array = []
for i in range(len(reviews)):
new_array.append(
list(zip(" ".join(jieba.cut(reviews[i])).split(' '), sum_gradients[i])))
```
For visualizasion purposes, word weights in each review are normalized to better illustrate differences between weights. Results for each review is stored in a list by making use of the `VisualizationTextRecord`.
```
pred_labels, pred_probs, avg_gradients = ig.interpret(
preprocess_fn(reviews),
return_pred=True,
visual=True)
sum_gradients = np.sum(avg_gradients, axis=-1).tolist()
new_array = []
for i in range(len(reviews)):
new_array.append(
list(zip(" ".join(jieba.cut(reviews[i])).split(' '), sum_gradients[i])))
true_labels = [0, 1]
recs = []
for i, l in enumerate(new_array):
words = [t[0] for t in l]
word_importances = [t[1] for t in l]
word_importances = np.array(word_importances) / np.linalg.norm(
word_importances)
pred_label = pred_labels[i]
pred_prob = pred_probs[i]
true_label = true_labels[i]
interp_class = pred_label
if interp_class == 0:
word_importances = -word_importances
recs.append(
VisualizationTextRecord(words, word_importances, true_label,
pred_label, pred_prob, interp_class))
visualize_text(recs)
```
The above cell's output is similar to the following:
```
from IPython.display import Image
Image(filename='assets/int_grad_nlp_viz.png')
```
|
github_jupyter
|
import paddle
import numpy as np
import interpretdl as it
from interpretdl.data_processor.visualizer import VisualizationTextRecord, visualize_text
import paddlenlp as ppnlp
from paddlenlp.datasets import load_dataset
train_ds, dev_ds, test_ds = load_dataset(
"chnsenticorp", splits=["train", "dev", "test"])
print(train_ds.label_list)
for data in train_ds[:5]:
print(data)
# downloads the word dict to assets/
# 在模型训练之前,需要先下载词汇表文件word_dict.txt,用于构造词-id映射关系。
!wget https://paddlenlp.bj.bcebos.com/data/senta_word_dict.txt -P assets/
from paddlenlp.data import JiebaTokenizer, Pad, Stack, Tuple, Vocab
VOCAB_PATH = "assets/senta_word_dict.txt"
vocab = Vocab.load_vocabulary(VOCAB_PATH, unk_token='[UNK]', pad_token='[PAD]')
vocab_size = len(vocab)
num_classes = len(train_ds.label_list)
pad_token_id = vocab.to_indices('[PAD]')
import paddle.nn as nn
import paddle.nn.functional as F
import paddlenlp
class LSTMModel(nn.Layer):
def __init__(self,
vocab_size,
num_classes,
emb_dim=128,
padding_idx=0,
lstm_hidden_size=198,
direction='forward',
lstm_layers=1,
dropout_rate=0.0,
pooling_type=None,
fc_hidden_size=96):
super().__init__()
# 首先将输入word id 查表后映射成 word embedding
self.embedder = nn.Embedding(
num_embeddings=vocab_size,
embedding_dim=emb_dim,
padding_idx=padding_idx)
# 将word embedding经过LSTMEncoder变换到文本语义表征空间中
self.lstm_encoder = paddlenlp.seq2vec.LSTMEncoder(
emb_dim,
lstm_hidden_size,
num_layers=lstm_layers,
direction=direction,
dropout=dropout_rate,
pooling_type=pooling_type)
# LSTMEncoder.get_output_dim()方法可以获取经过encoder之后的文本表示hidden_size
self.fc = nn.Linear(self.lstm_encoder.get_output_dim(), fc_hidden_size)
# 最后的分类器
self.output_layer = nn.Linear(fc_hidden_size, num_classes)
def forward(self, text, seq_len):
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens, num_directions*lstm_hidden_size)
# num_directions = 2 if direction is 'bidirectional' else 1
text_repr = self.lstm_encoder(embedded_text, sequence_length=seq_len)
# Shape: (batch_size, fc_hidden_size)
fc_out = paddle.tanh(self.fc(text_repr))
# Shape: (batch_size, num_classes)
logits = self.output_layer(fc_out)
return logits
model_ = LSTMModel(
len(vocab),
len(train_ds.label_list),
direction='bidirectional',
padding_idx=vocab['[PAD]'])
model = paddle.Model(model_)
from functools import partial
def convert_example(example, tokenizer, is_test=False):
input_ids = tokenizer.encode(example["text"])
valid_length = np.array(len(input_ids), dtype='int64')
input_ids = np.array(input_ids, dtype='int64')
if not is_test:
label = np.array(example["label"], dtype="int64")
return input_ids, valid_length, label
else:
qid = np.array(example["qid"], dtype="int64")
return input_ids, valid_length, qid
def create_dataloader(dataset,
trans_fn=None,
mode='train',
batch_size=1,
use_gpu=False,
batchify_fn=None):
"""
Creats dataloader.
Args:
dataset(obj:`paddle.io.Dataset`): Dataset instance.
trans_fn(obj:`callable`, optional, defaults to `None`): function to convert a data sample to input ids, etc.
mode(obj:`str`, optional, defaults to obj:`train`): If mode is 'train', it will shuffle the dataset randomly.
batch_size(obj:`int`, optional, defaults to 1): The sample number of a mini-batch.
use_gpu(obj:`bool`, optional, defaults to obj:`False`): Whether to use gpu to run.
batchify_fn(obj:`callable`, optional, defaults to `None`): function to generate mini-batch data by merging
the sample list, None for only stack each fields of sample in axis
0(same as :attr::`np.stack(..., axis=0)`).
Returns:
dataloader(obj:`paddle.io.DataLoader`): The dataloader which generates batches.
"""
if trans_fn:
dataset = dataset.map(trans_fn)
if mode == 'train' and use_gpu:
sampler = paddle.io.DistributedBatchSampler(
dataset=dataset, batch_size=batch_size, shuffle=True)
else:
shuffle = True if mode == 'train' else False
sampler = paddle.io.BatchSampler(
dataset=dataset, batch_size=batch_size, shuffle=shuffle)
dataloader = paddle.io.DataLoader(
dataset,
batch_sampler=sampler,
return_list=True,
collate_fn=batchify_fn)
return dataloader
# Reads data and generates mini-batches.
tokenizer = JiebaTokenizer(vocab)
trans_fn = partial(convert_example, tokenizer=tokenizer, is_test=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=vocab.token_to_idx.get('[PAD]', 0)), # input_ids
Stack(dtype="int64"), # seq len
Stack(dtype="int64") # label
): [data for data in fn(samples)]
train_loader = create_dataloader(
train_ds,
trans_fn=trans_fn,
batch_size=128,
mode='train',
batchify_fn=batchify_fn)
dev_loader = create_dataloader(
dev_ds,
trans_fn=trans_fn,
batch_size=128,
mode='validation',
batchify_fn=batchify_fn)
optimizer = paddle.optimizer.Adam(
parameters=model.parameters(), learning_rate=5e-4)
# Defines loss and metric.
criterion = paddle.nn.CrossEntropyLoss()
metric = paddle.metric.Accuracy()
model.prepare(optimizer, criterion, metric)
model.fit(
train_loader,
dev_loader,
epochs=2,
save_dir='assets/checkpoint',
verbose=1
)
# 模型加载
state_dict = paddle.load("assets/checkpoint/final.pdparams")
model_.set_dict(state_dict)
ig = it.IntGradNLPInterpreter(model_, True)
reviews = [
'这个宾馆比较陈旧了,特价的房间也很一般。总体来说一般',
'作为老的四星酒店,房间依然很整洁,相当不错。机场接机服务很好,可以在车上办理入住手续,节省时间。'
]
import jieba
def preprocess_fn(data):
texts = []
seq_lens = []
for text in data:
tokens = " ".join(jieba.cut(text)).split(' ')
# tokens = text.split()
ids = []
unk_id = vocab.token_to_idx.get('[UNK]', None)
for token in tokens:
wid = vocab.token_to_idx.get(token, unk_id)
if wid:
ids.append(wid)
texts.append(ids)
seq_lens.append(len(ids))
pad_token_id = 0
max_seq_len = max(seq_lens)
for index, text in enumerate(texts):
seq_len = len(text)
if seq_len < max_seq_len:
padded_tokens = [pad_token_id for _ in range(max_seq_len - seq_len)]
new_text = text + padded_tokens
texts[index] = new_text
elif seq_len > max_seq_len:
new_text = text[:max_seq_len]
texts[index] = new_text
texts = paddle.to_tensor(texts)
texts.stop_gradient = False
seq_lens = paddle.to_tensor(seq_lens)
seq_lens.stop_gradient = False
return texts, seq_lens
pred_labels, pred_probs, avg_gradients = ig.interpret(
preprocess_fn(reviews),
return_pred=True,
visual=True)
sum_gradients = np.sum(avg_gradients, axis=-1).tolist()
new_array = []
for i in range(len(reviews)):
new_array.append(
list(zip(" ".join(jieba.cut(reviews[i])).split(' '), sum_gradients[i])))
pred_labels, pred_probs, avg_gradients = ig.interpret(
preprocess_fn(reviews),
return_pred=True,
visual=True)
sum_gradients = np.sum(avg_gradients, axis=-1).tolist()
new_array = []
for i in range(len(reviews)):
new_array.append(
list(zip(" ".join(jieba.cut(reviews[i])).split(' '), sum_gradients[i])))
true_labels = [0, 1]
recs = []
for i, l in enumerate(new_array):
words = [t[0] for t in l]
word_importances = [t[1] for t in l]
word_importances = np.array(word_importances) / np.linalg.norm(
word_importances)
pred_label = pred_labels[i]
pred_prob = pred_probs[i]
true_label = true_labels[i]
interp_class = pred_label
if interp_class == 0:
word_importances = -word_importances
recs.append(
VisualizationTextRecord(words, word_importances, true_label,
pred_label, pred_prob, interp_class))
visualize_text(recs)
from IPython.display import Image
Image(filename='assets/int_grad_nlp_viz.png')
| 0.583797 | 0.840848 |
```
%matplotlib inline
```
The eigenfaces example: chaining PCA and SVMs
=============================================
The goal of this example is to show how an unsupervised method and a
supervised one can be chained for better prediction.
Here we'll take a look at a simple facial recognition example. Ideally,
we would use a dataset consisting of a subset of the `Labeled Faces in
the Wild <http://vis-www.cs.umass.edu/lfw/>`__ data that is available
with :func:`sklearn.datasets.fetch_lfw_people`. The labelled face in the wild face dataset.
However, this is a relatively large download (~200MB) so we will do the tutorial on a simpler, less rich dataset.
```
from sklearn import datasets
faces = datasets.fetch_olivetti_faces()
faces.data.shape
```
Let's visualize these faces to see what we're working with
```
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(8, 6))
# plot several images
for i in range(15):
ax = fig.add_subplot(3, 5, i + 1, xticks=[], yticks=[])
ax.imshow(faces.images[i], cmap=plt.cm.bone)
```
Note is that these faces have already been localized and scaled to a common size.
This is an important preprocessing piece for facial recognition, and is a process that can require a large collection of training data.
This can be done in scikit-learn, but the challenge is gathering a sufficient amount of training data for the algorithm to work.
We'll perform a Support Vector classification of the images. We'll do a
typical train-test split on the images:
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(faces.data,faces.target, random_state=85)
print(X_train.shape, X_test.shape)
```
Preprocessing: Principal Component Analysis
-------------------------------------------
We can use PCA to reduce these features to a manageable size, while maintaining most of the information
in the dataset.
```
from sklearn import decomposition
pca = decomposition.PCA(n_components=150, whiten=True)
pca.fit(X_train)
```
One interesting part of PCA is that it computes the "mean" face, which
can be interesting to examine:
```
plt.imshow(pca.mean_.reshape(faces.images[0].shape),cmap=plt.cm.bone)
```
The principal components measure deviations about this mean along
orthogonal axes.
```
print(pca.components_.shape)
```
It is also interesting to visualize these principal components:
```
fig = plt.figure(figsize=(16, 6))
for i in range(30):
ax = fig.add_subplot(3, 10, i + 1, xticks=[], yticks=[])
ax.imshow(pca.components_[i].reshape(faces.images[0].shape),cmap=plt.cm.bone)
```
The components ("eigenfaces") are ordered by their importance from
top-left to bottom-right. We see that the first few components seem to
primarily take care of lighting conditions; the remaining components
pull out certain identifying features: the nose, eyes, eyebrows, etc.
With this projection computed, we can now project our original training
and test data onto the PCA basis:
```
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print(X_train_pca.shape)
print(X_test_pca.shape)
```
These projected components correspond to factors in a linear combination
of component images such that the combination approaches the original
face.
Doing the Learning: Support Vector Machines
-------------------------------------------
Now we'll perform support-vector-machine classification on this reduced
dataset:
```
from sklearn import svm
clf = svm.SVC(C=5., gamma=0.001)
clf.fit(X_train_pca, y_train)
```
Finally, we can evaluate how well this classification did. First, we
might plot a few of the test-cases with the labels learned from the
training set:
```
import numpy as np
fig = plt.figure(figsize=(8, 6))
for i in range(15):
ax = fig.add_subplot(3, 5, i + 1, xticks=[], yticks=[])
ax.imshow(X_test[i].reshape(faces.images[0].shape),cmap=plt.cm.bone)
y_pred = clf.predict(X_test_pca[i, np.newaxis])[0]
color = ('black' if y_pred == y_test[i] else 'red')
ax.set_title(y_pred, fontsize='small', color=color)
```
The classifier is correct on an impressive number of images given the
simplicity of its learning model! Using a linear classifier on 150
features derived from the pixel-level data, the algorithm correctly
identifies a large number of the people in the images.
Again, we can quantify this effectiveness using one of several measures
from :mod:`sklearn.metrics`. First we can do the classification
report, which shows the precision, recall and other measures of the
"goodness" of the classification:
```
from sklearn import metrics
y_pred = clf.predict(X_test_pca)
print(metrics.classification_report(y_test, y_pred))
```
Another interesting metric is the *confusion matrix*, which indicates
how often any two items are mixed-up. The confusion matrix of a perfect
classifier would only have nonzero entries on the diagonal, with zeros
on the off-diagonal:
```
print(metrics.confusion_matrix(y_test, y_pred))
```
Pipelining
----------
Above we used PCA as a pre-processing step before applying our support
vector machine classifier. Plugging the output of one estimator directly
into the input of a second estimator is a commonly used pattern; for
this reason scikit-learn provides a ``Pipeline`` object which automates
this process. The above problem can be re-expressed as a pipeline as
follows:
```
from sklearn.pipeline import Pipeline
clf = Pipeline([('pca', decomposition.PCA(n_components=150, whiten=True)), ('svm', svm.LinearSVC(C=1.0))])
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(metrics.confusion_matrix(y_pred, y_test))
plt.show()
```
A Note on Facial Recognition
----------------------------
Here we have used PCA "eigenfaces" as a pre-processing step for facial
recognition. The reason we chose this is because PCA is a
broadly-applicable technique, which can be useful for a wide array of
data types. Research in the field of facial recognition in particular,
however, has shown that other more specific feature extraction methods
are can be much more effective.
Assignment: Perform SVM with PCA operation on Breast Cancer Dataset and Iris Dataset.
|
github_jupyter
|
%matplotlib inline
from sklearn import datasets
faces = datasets.fetch_olivetti_faces()
faces.data.shape
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(8, 6))
# plot several images
for i in range(15):
ax = fig.add_subplot(3, 5, i + 1, xticks=[], yticks=[])
ax.imshow(faces.images[i], cmap=plt.cm.bone)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(faces.data,faces.target, random_state=85)
print(X_train.shape, X_test.shape)
from sklearn import decomposition
pca = decomposition.PCA(n_components=150, whiten=True)
pca.fit(X_train)
plt.imshow(pca.mean_.reshape(faces.images[0].shape),cmap=plt.cm.bone)
print(pca.components_.shape)
fig = plt.figure(figsize=(16, 6))
for i in range(30):
ax = fig.add_subplot(3, 10, i + 1, xticks=[], yticks=[])
ax.imshow(pca.components_[i].reshape(faces.images[0].shape),cmap=plt.cm.bone)
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print(X_train_pca.shape)
print(X_test_pca.shape)
from sklearn import svm
clf = svm.SVC(C=5., gamma=0.001)
clf.fit(X_train_pca, y_train)
import numpy as np
fig = plt.figure(figsize=(8, 6))
for i in range(15):
ax = fig.add_subplot(3, 5, i + 1, xticks=[], yticks=[])
ax.imshow(X_test[i].reshape(faces.images[0].shape),cmap=plt.cm.bone)
y_pred = clf.predict(X_test_pca[i, np.newaxis])[0]
color = ('black' if y_pred == y_test[i] else 'red')
ax.set_title(y_pred, fontsize='small', color=color)
from sklearn import metrics
y_pred = clf.predict(X_test_pca)
print(metrics.classification_report(y_test, y_pred))
print(metrics.confusion_matrix(y_test, y_pred))
from sklearn.pipeline import Pipeline
clf = Pipeline([('pca', decomposition.PCA(n_components=150, whiten=True)), ('svm', svm.LinearSVC(C=1.0))])
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(metrics.confusion_matrix(y_pred, y_test))
plt.show()
| 0.695958 | 0.988256 |
Muestra para cada dataset todo lo que se puede hacer con el none_box
```
from demo_utils.demo10 import Demo10
from demo_utils.general import SUPPORTED_DATASETS
from IPython.display import Markdown as md
from demo_utils.get_hyper_params import get_hyper_params
%%javascript
IPython.OutputArea.prototype._should_scroll = function(lines) {
return false;
}
import warnings
warnings.filterwarnings('ignore')
# d10_data = {
# # 'dts_name': testing_dataset,
# 'dts_size': 1000,
# 'features_range': (500, 501),
# 'rbfsampler_gamma': 'UNUSED',
# 'nystroem_gamma': 'UNUSED',
# 'hparams': {'dt': {'max_depth': None,
# 'min_samples_split': 2,
# 'min_samples_leaf': 1,
# 'min_weight_fraction_leaf': 0.0,
# 'max_leaf_nodes': None,
# 'min_impurity_decrease': 0.0},
# 'logit': {'C': 1000.0},
# 'linear_svc': {'C': 5}}
# }
d10_data = {
# 'dts_name': testing_dataset,
'dts_size': 1000,
'features_range': (500, 501),
}
def get_a_model(model_name, sampler_name, dts_name):
box_type = 'none'
n_estim = None
# más adelante habrá que soportar distintas box
# {'model_name': model_name,
# 'sampler_name': 'identity',
# 'sampler_gamma': None,
# 'model_params': {},
# # 'box_type': 'none',
# 'box_type': box_type,
# 'n_estim': None,
# 'pca': False,
# 'pca_first': False}
ret_dic = {'model_name': model_name,
# 'sampler_name': 'identity',
'sampler_name': sampler_name,
'sampler_gamma': None,
'model_params': {},
'box_type': box_type,
'n_estim': n_estim,
'pca': False,
'pca_first': False}
hyper_params = get_hyper_params(dts_name=dts_name, box_name=box_type,
model_name=model_name, sampler_name=sampler_name)
gamma = hyper_params.pop('gamma', None)
# ret_dic['sampler_gamma'] = gamma
ret_dic['gamma'] = gamma
# ret_dic['model_params'] = hyper_params
ret_dic['base_model_params'] = hyper_params
if sampler_name == 'rff':
ret_dic['sampler_name'] = 'rbf'
# elif sampler_name == 'nystroem':
# ret_dic['sampler_name'] = 'nystroem'
return ret_dic
def test_dt(d10_data):
d10 = Demo10()
new_data = dict(d10_data)
dts_name = new_data['dts_name']
model_name = 'dt'
# dt solo, dt con rff y dt con nystroem
m1 = get_a_model(model_name=model_name, sampler_name='identity', dts_name=dts_name)
m2 = get_a_model(model_name=model_name, sampler_name='rff', dts_name=dts_name)
m3 = get_a_model(model_name=model_name, sampler_name='nystroem', dts_name=dts_name)
models = [m1, m2, m3,]
new_data['models'] = models
d10.non_interactive(**new_data)
def test_logit(d10_data):
d10 = Demo10()
new_data = dict(d10_data)
dts_name = new_data['dts_name']
model_name = 'logit'
# logit solo, logit con rff y logit con nystroem
m1 = get_a_model(model_name=model_name, sampler_name='identity', dts_name=dts_name)
m2 = get_a_model(model_name=model_name, sampler_name='rff', dts_name=dts_name)
m3 = get_a_model(model_name=model_name, sampler_name='nystroem', dts_name=dts_name)
models = [m1, m2, m3,]
new_data['models'] = models
d10.non_interactive(**new_data)
def test_linear_svc(d10_data):
d10 = Demo10()
new_data = dict(d10_data)
dts_name = new_data['dts_name']
model_name = 'linear_svc'
# linear_svc solo, linear_svc con rff y linear_svc con nystroem
m1 = get_a_model(model_name=model_name, sampler_name='identity', dts_name=dts_name)
m2 = get_a_model(model_name=model_name, sampler_name='rff', dts_name=dts_name)
m3 = get_a_model(model_name=model_name, sampler_name='nystroem', dts_name=dts_name)
models = [m1, m2, m3,]
new_data['models'] = models
d10.non_interactive(**new_data)
def test_dataset(d10_data, dts_name):
new_data = dict(d10_data)
new_data['dts_name'] = dts_name
display(md(f'# {dts_name}'))
test_dt(new_data)
test_logit(new_data)
test_linear_svc(new_data)
def test_everything():
for dts_name in SUPPORTED_DATASETS:
test_dataset(d10_data, dts_name=dts_name)
test_everything()
```
|
github_jupyter
|
from demo_utils.demo10 import Demo10
from demo_utils.general import SUPPORTED_DATASETS
from IPython.display import Markdown as md
from demo_utils.get_hyper_params import get_hyper_params
%%javascript
IPython.OutputArea.prototype._should_scroll = function(lines) {
return false;
}
import warnings
warnings.filterwarnings('ignore')
# d10_data = {
# # 'dts_name': testing_dataset,
# 'dts_size': 1000,
# 'features_range': (500, 501),
# 'rbfsampler_gamma': 'UNUSED',
# 'nystroem_gamma': 'UNUSED',
# 'hparams': {'dt': {'max_depth': None,
# 'min_samples_split': 2,
# 'min_samples_leaf': 1,
# 'min_weight_fraction_leaf': 0.0,
# 'max_leaf_nodes': None,
# 'min_impurity_decrease': 0.0},
# 'logit': {'C': 1000.0},
# 'linear_svc': {'C': 5}}
# }
d10_data = {
# 'dts_name': testing_dataset,
'dts_size': 1000,
'features_range': (500, 501),
}
def get_a_model(model_name, sampler_name, dts_name):
box_type = 'none'
n_estim = None
# más adelante habrá que soportar distintas box
# {'model_name': model_name,
# 'sampler_name': 'identity',
# 'sampler_gamma': None,
# 'model_params': {},
# # 'box_type': 'none',
# 'box_type': box_type,
# 'n_estim': None,
# 'pca': False,
# 'pca_first': False}
ret_dic = {'model_name': model_name,
# 'sampler_name': 'identity',
'sampler_name': sampler_name,
'sampler_gamma': None,
'model_params': {},
'box_type': box_type,
'n_estim': n_estim,
'pca': False,
'pca_first': False}
hyper_params = get_hyper_params(dts_name=dts_name, box_name=box_type,
model_name=model_name, sampler_name=sampler_name)
gamma = hyper_params.pop('gamma', None)
# ret_dic['sampler_gamma'] = gamma
ret_dic['gamma'] = gamma
# ret_dic['model_params'] = hyper_params
ret_dic['base_model_params'] = hyper_params
if sampler_name == 'rff':
ret_dic['sampler_name'] = 'rbf'
# elif sampler_name == 'nystroem':
# ret_dic['sampler_name'] = 'nystroem'
return ret_dic
def test_dt(d10_data):
d10 = Demo10()
new_data = dict(d10_data)
dts_name = new_data['dts_name']
model_name = 'dt'
# dt solo, dt con rff y dt con nystroem
m1 = get_a_model(model_name=model_name, sampler_name='identity', dts_name=dts_name)
m2 = get_a_model(model_name=model_name, sampler_name='rff', dts_name=dts_name)
m3 = get_a_model(model_name=model_name, sampler_name='nystroem', dts_name=dts_name)
models = [m1, m2, m3,]
new_data['models'] = models
d10.non_interactive(**new_data)
def test_logit(d10_data):
d10 = Demo10()
new_data = dict(d10_data)
dts_name = new_data['dts_name']
model_name = 'logit'
# logit solo, logit con rff y logit con nystroem
m1 = get_a_model(model_name=model_name, sampler_name='identity', dts_name=dts_name)
m2 = get_a_model(model_name=model_name, sampler_name='rff', dts_name=dts_name)
m3 = get_a_model(model_name=model_name, sampler_name='nystroem', dts_name=dts_name)
models = [m1, m2, m3,]
new_data['models'] = models
d10.non_interactive(**new_data)
def test_linear_svc(d10_data):
d10 = Demo10()
new_data = dict(d10_data)
dts_name = new_data['dts_name']
model_name = 'linear_svc'
# linear_svc solo, linear_svc con rff y linear_svc con nystroem
m1 = get_a_model(model_name=model_name, sampler_name='identity', dts_name=dts_name)
m2 = get_a_model(model_name=model_name, sampler_name='rff', dts_name=dts_name)
m3 = get_a_model(model_name=model_name, sampler_name='nystroem', dts_name=dts_name)
models = [m1, m2, m3,]
new_data['models'] = models
d10.non_interactive(**new_data)
def test_dataset(d10_data, dts_name):
new_data = dict(d10_data)
new_data['dts_name'] = dts_name
display(md(f'# {dts_name}'))
test_dt(new_data)
test_logit(new_data)
test_linear_svc(new_data)
def test_everything():
for dts_name in SUPPORTED_DATASETS:
test_dataset(d10_data, dts_name=dts_name)
test_everything()
| 0.331444 | 0.489442 |
```
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
ros = RandomOverSampler(random_state=0)
rus = RandomUnderSampler(random_state=0)
Data = pd.read_csv('dataset/falldeteciton.csv')
Data.head()
Data.dropna(inplace=True)
# Activity is the Target Label
# Time does not seem to be am important feature
# Before splitting the data into train test validation we would assume that the data we get would be cleaned
# Before using the data for training purpose we must check if the data is balanced
labelCounts = Data.groupby('ACTIVITY')['TIME'].count()
plt.bar(labelCounts.index,labelCounts.values)
plt.ylabel('Number of examples')
plt.xlabel('Activity label')
plt.show()
```
#### Here as we can see the data is imbalanced, thus we must resort to Oversampling or Undersampling technique in this scenario, number of values for one of the activity label is really low so we would choose oversampling over undersampling as we need enough data points for each label for appropriate training
```
def sampling(X,y,sample='oversample'):
if sample=='oversample':
X_resampled, y_resampled = ros.fit_resample(X, y)
else:
X_resampled, y_resampled = rus.fit_resample(X, y)
return X_resampled,y_resampled
features = ['SL','EEG','BP','HR','CIRCLUATION']
for i in features:
IQR = Data[i].quantile(0.75) - Data[i].quantile(0.25)
Data = Data[(Data[i]< (Data[i].quantile(0.75)+IQR)) & (Data[i] > (Data[i].quantile(0.25)-IQR))]
Feature_Data = Data.loc[:,features]
X_resampled, Y_resampled = sampling(Feature_Data,Data['ACTIVITY'])
# test_size = 0.25 means we would want to split the data into 75% train and 25% test
X_Train, X_Test, y_train, y_test = train_test_split(X_resampled, Y_resampled,test_size=0.25)
```
### Now we can do two things
### 1. Split the train data further into train and validate - This is something we can do when we have a good number of data points
### 2. Evaluate models on the cross_val_score on the train data
##### We would be evaluating only Random Forest Classifier based on accuracy score
#### Approach 1
```
TrainX, ValidateX, TrainY, ValidateY = train_test_split(X_Train,y_train,test_size=0.25)
rf = RandomForestClassifier()
rf.fit(TrainX,TrainY)
y_predict_valid = rf.predict(ValidateX)
accuracy_score(ValidateY,y_predict_valid)
```
#### This is the accuracy on the validation data, and we would pick the model with the best score ( depending on the metric accuracy in this case) on the validation dataset
#### There is a small problem with this approach, what if our validation sample is biased towards a certain target label
#### In order to avoid this we would resort to cross validation and evaluate the model on the cross validation score
#### Approach 2
```
# Cross Validation for Random Forest Classifier
from sklearn.model_selection import cross_val_score
rf1 = RandomForestClassifier()
scores = cross_val_score(rf1, X_Train, y_train, cv=5)
scores.mean()
```
#### We get result in both approach to be somewhat the same
|
github_jupyter
|
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
ros = RandomOverSampler(random_state=0)
rus = RandomUnderSampler(random_state=0)
Data = pd.read_csv('dataset/falldeteciton.csv')
Data.head()
Data.dropna(inplace=True)
# Activity is the Target Label
# Time does not seem to be am important feature
# Before splitting the data into train test validation we would assume that the data we get would be cleaned
# Before using the data for training purpose we must check if the data is balanced
labelCounts = Data.groupby('ACTIVITY')['TIME'].count()
plt.bar(labelCounts.index,labelCounts.values)
plt.ylabel('Number of examples')
plt.xlabel('Activity label')
plt.show()
def sampling(X,y,sample='oversample'):
if sample=='oversample':
X_resampled, y_resampled = ros.fit_resample(X, y)
else:
X_resampled, y_resampled = rus.fit_resample(X, y)
return X_resampled,y_resampled
features = ['SL','EEG','BP','HR','CIRCLUATION']
for i in features:
IQR = Data[i].quantile(0.75) - Data[i].quantile(0.25)
Data = Data[(Data[i]< (Data[i].quantile(0.75)+IQR)) & (Data[i] > (Data[i].quantile(0.25)-IQR))]
Feature_Data = Data.loc[:,features]
X_resampled, Y_resampled = sampling(Feature_Data,Data['ACTIVITY'])
# test_size = 0.25 means we would want to split the data into 75% train and 25% test
X_Train, X_Test, y_train, y_test = train_test_split(X_resampled, Y_resampled,test_size=0.25)
TrainX, ValidateX, TrainY, ValidateY = train_test_split(X_Train,y_train,test_size=0.25)
rf = RandomForestClassifier()
rf.fit(TrainX,TrainY)
y_predict_valid = rf.predict(ValidateX)
accuracy_score(ValidateY,y_predict_valid)
# Cross Validation for Random Forest Classifier
from sklearn.model_selection import cross_val_score
rf1 = RandomForestClassifier()
scores = cross_val_score(rf1, X_Train, y_train, cv=5)
scores.mean()
| 0.586641 | 0.869493 |
# Adding Boundary Pores
```
import numpy as np
import openpnm as op
print(op.__version__)
```
Start by creating a Delaunay network. Because it uses random base points it will better illustrate the process of adding boundary pores to arbitrary networks:
```
pn = op.network.Delaunay(num_points=200, shape=[1, 1, 0])
print(pn)
```
As can be seen in the above printout, the Delaunay class predefines many labels including boundaries and sides. In fact, as can be seen in the plot below, the Delaunay class also adds boundary pores to the topology. (Note that the Delaunay network is generated randomly so your's will not look the same, nor have the same number of total pores and throats). In this case, the location of the boundary pores is determined from the Voronoi cell that surrounds each Delaunay point, so the boundary cells apper to be randomly oriented relative to the internal pore they are connected with. In the example that follows, we'll be removing these pores, then adding boundary pores in a manual way.
```
fig = op.topotools.plot_connections(network=pn)
fig = op.topotools.plot_coordinates(network=pn, fig=fig, c='r')
fig.set_size_inches((7, 7))
```
For the purpose of this tutorial, we will trim these boundary pores from the network since we'll be adding our own.
```
op.topotools.trim(network=pn, pores=pn.pores('boundary'))
```
Plotting the network now shows the missing pores. Our goal will be re-add boundary pores to each face.
```
fig = op.topotools.plot_connections(network=pn)
fig = op.topotools.plot_coordinates(network=pn, fig=fig, c='r')
fig.set_size_inches((7, 7))
```
## Find surface pores
The ``topotools`` module in OpenPNM provides many handy helper functions for dealing with topology. We'll first use the ``find_surface_pores`` function. It works be specifying the location of a set of *marker* points outside the domain, then performing a Delaunay tessellation between these markers and the network pores. Any pores that form a simplex with the marker points are considered to be on the surface. By default OpenPNM will place *one* marker on each edge of the domain in an attempt to find all the surfaces. In our case, we will specify them manually to only find one face.
Specifying the markers can be a challenge. If we only specify a single marker, we will only find a limited number of surface pores due to the way the triangulation works.
```
markers = np.array([[-0.1, 0.5]])
op.topotools.find_surface_pores(network=pn, markers=markers, label='left_surface')
fig = op.topotools.plot_connections(network=pn)
fig = op.topotools.plot_coordinates(network=pn, pores=pn.pores('left_surface'), fig=fig, c='r')
fig.set_size_inches((7, 7))
```
As can be seen, some of the pores in deeper recesses of the surface were not found by this method. If we want to be certain of finding all the surface pores on the left side of the domain we can add more markers:
```
markers = np.array([[-0.1, 0.2], [-0.1, 0.4], [-0.1, 0.6], [-0.1, 0.8]])
op.topotools.find_surface_pores(network=pn, markers=markers, label='left_surface')
fig = op.topotools.plot_connections(network=pn)
fig = op.topotools.plot_coordinates(network=pn, pores=pn.pores('left_surface'), fig=fig, c='r')
fig.set_size_inches((7, 7))
```
Now we've captured several more pores. In some cases we may actually get more than we wanted, including some that are more correctly on the bottom of the domain. This is why finding surfaces requires a careful touch, although this problem becomes less important in domains with more pores.
## Cloning surface pores
Next we want to take the newly labeled surface pores and 'clone' them. This creates new pores in the network that are physically located in the same place as their 'parents'. They are also connected only to their 'parents' by default which is what we want, though this can be changed using the ``mode`` argument. In the following code, we tell the function to clone the 'left_surface' pores and to give them a new label of 'left_boundary'.
```
op.topotools.clone_pores(network=pn, pores=pn.pores('left_surface'), labels=['left_boundary'])
```
Now that we've cloned the pores, we need to move them. In this case we want them to all site on teh x=0 boundary face. We can do this by directly altering the 'pore.coords' array:
```
Ps = pn.pores('left_boundary')
coords = pn['pore.coords'][Ps]
coords *= [0, 1, 1]
pn['pore.coords'][Ps] = coords
```
The above code will set the x-coordinate of each of the cloned pores to 0, while maintaining the other coordinates the same. The result is:
```
fig = op.topotools.plot_connections(network=pn)
fig = op.topotools.plot_coordinates(network=pn, pores=pn.pores('left_surface'), fig=fig, c='r')
fig = op.topotools.plot_coordinates(network=pn, pores=pn.pores('left_boundary'), fig=fig, c='g')
fig.set_size_inches((7, 7))
```
|
github_jupyter
|
import numpy as np
import openpnm as op
print(op.__version__)
pn = op.network.Delaunay(num_points=200, shape=[1, 1, 0])
print(pn)
fig = op.topotools.plot_connections(network=pn)
fig = op.topotools.plot_coordinates(network=pn, fig=fig, c='r')
fig.set_size_inches((7, 7))
op.topotools.trim(network=pn, pores=pn.pores('boundary'))
fig = op.topotools.plot_connections(network=pn)
fig = op.topotools.plot_coordinates(network=pn, fig=fig, c='r')
fig.set_size_inches((7, 7))
markers = np.array([[-0.1, 0.5]])
op.topotools.find_surface_pores(network=pn, markers=markers, label='left_surface')
fig = op.topotools.plot_connections(network=pn)
fig = op.topotools.plot_coordinates(network=pn, pores=pn.pores('left_surface'), fig=fig, c='r')
fig.set_size_inches((7, 7))
markers = np.array([[-0.1, 0.2], [-0.1, 0.4], [-0.1, 0.6], [-0.1, 0.8]])
op.topotools.find_surface_pores(network=pn, markers=markers, label='left_surface')
fig = op.topotools.plot_connections(network=pn)
fig = op.topotools.plot_coordinates(network=pn, pores=pn.pores('left_surface'), fig=fig, c='r')
fig.set_size_inches((7, 7))
op.topotools.clone_pores(network=pn, pores=pn.pores('left_surface'), labels=['left_boundary'])
Ps = pn.pores('left_boundary')
coords = pn['pore.coords'][Ps]
coords *= [0, 1, 1]
pn['pore.coords'][Ps] = coords
fig = op.topotools.plot_connections(network=pn)
fig = op.topotools.plot_coordinates(network=pn, pores=pn.pores('left_surface'), fig=fig, c='r')
fig = op.topotools.plot_coordinates(network=pn, pores=pn.pores('left_boundary'), fig=fig, c='g')
fig.set_size_inches((7, 7))
| 0.425009 | 0.971966 |
```
# Datset source
# https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction
# Problem statement: Predict the appliances energy use based on various features
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# Common imports
import numpy as np
import os
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
# Read the dataset
import pandas as pd
pd.options.display.max_columns = 1000
aep_df = pd.read_csv('energydata_complete.csv', sep=',')
print(aep_df.shape)
aep_df.head()
# Check for NAN values in the entire dataframe
aep_df.isnull().sum().sum()
# To make this notebook's output identical at every run
np.random.seed(2)
# Split the dataframe into features and labels
X = aep_df.drop(['date', 'Appliances'], axis=1).values
y = aep_df.loc[:, 'Appliances'].values
print("X shape: ", X.shape, "y shape: ", y.shape)
print("Sample X values: ", X[:5], "\n", "Sample y values: ", y[:5])
# Split the dataset into train and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05, random_state=2)
print(" X_train shape: ", X_train.shape,"\n", "y_train shape: ", y_train.shape,"\n",
"X_test shape: ", X_test.shape,"\n", "y_test shape: ", y_test.shape,"\n")
# Scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Model 1
# Sklearn LinearSVR model with default parameters
from sklearn.svm import LinearSVR
lin_svr = LinearSVR(random_state=2)
lin_svr.fit(X_train_scaled, y_train)
# R^2 values for train and test sets
print("Train set R^2 score: ", lin_svr.score(X_train_scaled, y_train))
print("Test set R^2 score: ", lin_svr.score(X_test_scaled, y_test))
# Mean Squared Errors of train and test sets
from sklearn.metrics import mean_squared_error
print("Train set mse: ", mean_squared_error(y_train, lin_svr.predict(X_train_scaled)))
print("Test set mse: ", mean_squared_error(y_test, lin_svr.predict(X_test_scaled)))
# Mean Absolute Errors of train and test sets
from sklearn.metrics import mean_absolute_error
print("Train set mae: ", mean_absolute_error(y_train, lin_svr.predict(X_train_scaled)))
print("Test set mae: ", mean_absolute_error(y_test, lin_svr.predict(X_test_scaled)))
# LinearSVR with default hyperparameters is very poor at fitting the data, we will try to increase the R^2 score by using nonlinear kernels
# Model 2
# Sklearn SVR model with rbf kernel
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
param_distributions = {"gamma": reciprocal(0.001, 1.0), "C": uniform(1, 10)}
rbf_rnd_search_cv = RandomizedSearchCV(SVR(), param_distributions, n_iter=30, n_jobs=6, verbose=5, cv=3, random_state=2)
rbf_rnd_search_cv.fit(X_train_scaled, y_train)
rbf_rnd_search_cv.best_estimator_
# R^2 values for train and test sets
print("Train set R^2 score: ", rbf_rnd_search_cv.best_estimator_.score(X_train_scaled, y_train))
print("Test set R^2 score: ", rbf_rnd_search_cv.best_estimator_.score(X_test_scaled, y_test))
# Mean Squared Errors of train and test sets
print("Train set mse: ", mean_squared_error(y_train, rbf_rnd_search_cv.best_estimator_.predict(X_train_scaled)))
print("Test set mse: ", mean_squared_error(y_test, rbf_rnd_search_cv.best_estimator_.predict(X_test_scaled)))
# Mean Absolute Errors of train and test sets
from sklearn.metrics import mean_absolute_error
print("Train set mae: ", mean_absolute_error(y_train, rbf_rnd_search_cv.best_estimator_.predict(X_train_scaled)))
print("Test set mae: ", mean_absolute_error(y_test, rbf_rnd_search_cv.best_estimator_.predict(X_test_scaled)))
# Model 3
# Sklearn SVR model with polynomial kernel
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
poly_param_distributions = {"gamma": reciprocal(0.001, 1.0), "C": uniform(1, 10)}
poly_rnd_search_cv = RandomizedSearchCV(SVR(kernel='poly', degree=3, coef0=1), poly_param_distributions, n_iter=10, n_jobs=6, verbose=5, cv=3, random_state=2)
poly_rnd_search_cv.fit(X_train_scaled, y_train)
poly_rnd_search_cv.best_estimator_
# R^2 values for train and test sets
print("Train set R^2 score: ", poly_rnd_search_cv.best_estimator_.score(X_train_scaled, y_train))
print("Test set R^2 score: ", poly_rnd_search_cv.best_estimator_.score(X_test_scaled, y_test))
# Mean Squared Errors of train and test sets
print("Train set mse: ", mean_squared_error(y_train, poly_rnd_search_cv.best_estimator_.predict(X_train_scaled)))
print("Test set mse: ", mean_squared_error(y_test, poly_rnd_search_cv.best_estimator_.predict(X_test_scaled)))
# Mean Absolute Errors of train and test sets
from sklearn.metrics import mean_absolute_error
print("Train set mae: ", mean_absolute_error(y_train, poly_rnd_search_cv.best_estimator_.predict(X_train_scaled)))
print("Test set mae: ", mean_absolute_error(y_test, poly_rnd_search_cv.best_estimator_.predict(X_test_scaled)))
# Model 3
# Sklearn SVR model with 5th order polynomial kernel
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
poly_5_param_distributions = {"gamma": reciprocal(0.001, 1.0), "C": uniform(1, 10)}
poly_5_rnd_search_cv = RandomizedSearchCV(SVR(kernel='poly', degree=7, coef0=1), poly_5_param_distributions, n_iter=5, n_jobs=6, verbose=5, cv=3, random_state=2)
poly_5_rnd_search_cv.fit(X_train_scaled, y_train)
poly_5_rnd_search_cv.best_estimator_
# R^2 values for train and test sets
print("Train set R^2 score: ", poly_5_rnd_search_cv.best_estimator_.score(X_train_scaled, y_train))
print("Test set R^2 score: ", poly_5_rnd_search_cv.best_estimator_.score(X_test_scaled, y_test))
# Model 4
# Sklearn SVR model with 7th order polynomial kernel
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
poly_7_param_distributions = {"gamma": reciprocal(0.001, 1.0), "C": uniform(1, 10)}
poly_7_rnd_search_cv = RandomizedSearchCV(SVR(kernel='poly', degree=7, coef0=1), poly_5_param_distributions, n_iter=5, n_jobs=6, verbose=5, cv=3, random_state=2)
poly_7_rnd_search_cv.fit(X_train_scaled, y_train)
poly_7_rnd_search_cv.best_estimator_
# R^2 values for train and test sets
print("Train set R^2 score: ", poly_7_rnd_search_cv.best_estimator_.score(X_train_scaled, y_train))
print("Test set R^2 score: ", poly_7_rnd_search_cv.best_estimator_.score(X_test_scaled, y_test))
# It turns out polynomial kernel model with degree 5 is a better model than linear svr, rbf model and polynomial kernel with other degrees with the specified set of parameters
```
|
github_jupyter
|
# Datset source
# https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction
# Problem statement: Predict the appliances energy use based on various features
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# Common imports
import numpy as np
import os
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
# Read the dataset
import pandas as pd
pd.options.display.max_columns = 1000
aep_df = pd.read_csv('energydata_complete.csv', sep=',')
print(aep_df.shape)
aep_df.head()
# Check for NAN values in the entire dataframe
aep_df.isnull().sum().sum()
# To make this notebook's output identical at every run
np.random.seed(2)
# Split the dataframe into features and labels
X = aep_df.drop(['date', 'Appliances'], axis=1).values
y = aep_df.loc[:, 'Appliances'].values
print("X shape: ", X.shape, "y shape: ", y.shape)
print("Sample X values: ", X[:5], "\n", "Sample y values: ", y[:5])
# Split the dataset into train and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05, random_state=2)
print(" X_train shape: ", X_train.shape,"\n", "y_train shape: ", y_train.shape,"\n",
"X_test shape: ", X_test.shape,"\n", "y_test shape: ", y_test.shape,"\n")
# Scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Model 1
# Sklearn LinearSVR model with default parameters
from sklearn.svm import LinearSVR
lin_svr = LinearSVR(random_state=2)
lin_svr.fit(X_train_scaled, y_train)
# R^2 values for train and test sets
print("Train set R^2 score: ", lin_svr.score(X_train_scaled, y_train))
print("Test set R^2 score: ", lin_svr.score(X_test_scaled, y_test))
# Mean Squared Errors of train and test sets
from sklearn.metrics import mean_squared_error
print("Train set mse: ", mean_squared_error(y_train, lin_svr.predict(X_train_scaled)))
print("Test set mse: ", mean_squared_error(y_test, lin_svr.predict(X_test_scaled)))
# Mean Absolute Errors of train and test sets
from sklearn.metrics import mean_absolute_error
print("Train set mae: ", mean_absolute_error(y_train, lin_svr.predict(X_train_scaled)))
print("Test set mae: ", mean_absolute_error(y_test, lin_svr.predict(X_test_scaled)))
# LinearSVR with default hyperparameters is very poor at fitting the data, we will try to increase the R^2 score by using nonlinear kernels
# Model 2
# Sklearn SVR model with rbf kernel
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
param_distributions = {"gamma": reciprocal(0.001, 1.0), "C": uniform(1, 10)}
rbf_rnd_search_cv = RandomizedSearchCV(SVR(), param_distributions, n_iter=30, n_jobs=6, verbose=5, cv=3, random_state=2)
rbf_rnd_search_cv.fit(X_train_scaled, y_train)
rbf_rnd_search_cv.best_estimator_
# R^2 values for train and test sets
print("Train set R^2 score: ", rbf_rnd_search_cv.best_estimator_.score(X_train_scaled, y_train))
print("Test set R^2 score: ", rbf_rnd_search_cv.best_estimator_.score(X_test_scaled, y_test))
# Mean Squared Errors of train and test sets
print("Train set mse: ", mean_squared_error(y_train, rbf_rnd_search_cv.best_estimator_.predict(X_train_scaled)))
print("Test set mse: ", mean_squared_error(y_test, rbf_rnd_search_cv.best_estimator_.predict(X_test_scaled)))
# Mean Absolute Errors of train and test sets
from sklearn.metrics import mean_absolute_error
print("Train set mae: ", mean_absolute_error(y_train, rbf_rnd_search_cv.best_estimator_.predict(X_train_scaled)))
print("Test set mae: ", mean_absolute_error(y_test, rbf_rnd_search_cv.best_estimator_.predict(X_test_scaled)))
# Model 3
# Sklearn SVR model with polynomial kernel
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
poly_param_distributions = {"gamma": reciprocal(0.001, 1.0), "C": uniform(1, 10)}
poly_rnd_search_cv = RandomizedSearchCV(SVR(kernel='poly', degree=3, coef0=1), poly_param_distributions, n_iter=10, n_jobs=6, verbose=5, cv=3, random_state=2)
poly_rnd_search_cv.fit(X_train_scaled, y_train)
poly_rnd_search_cv.best_estimator_
# R^2 values for train and test sets
print("Train set R^2 score: ", poly_rnd_search_cv.best_estimator_.score(X_train_scaled, y_train))
print("Test set R^2 score: ", poly_rnd_search_cv.best_estimator_.score(X_test_scaled, y_test))
# Mean Squared Errors of train and test sets
print("Train set mse: ", mean_squared_error(y_train, poly_rnd_search_cv.best_estimator_.predict(X_train_scaled)))
print("Test set mse: ", mean_squared_error(y_test, poly_rnd_search_cv.best_estimator_.predict(X_test_scaled)))
# Mean Absolute Errors of train and test sets
from sklearn.metrics import mean_absolute_error
print("Train set mae: ", mean_absolute_error(y_train, poly_rnd_search_cv.best_estimator_.predict(X_train_scaled)))
print("Test set mae: ", mean_absolute_error(y_test, poly_rnd_search_cv.best_estimator_.predict(X_test_scaled)))
# Model 3
# Sklearn SVR model with 5th order polynomial kernel
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
poly_5_param_distributions = {"gamma": reciprocal(0.001, 1.0), "C": uniform(1, 10)}
poly_5_rnd_search_cv = RandomizedSearchCV(SVR(kernel='poly', degree=7, coef0=1), poly_5_param_distributions, n_iter=5, n_jobs=6, verbose=5, cv=3, random_state=2)
poly_5_rnd_search_cv.fit(X_train_scaled, y_train)
poly_5_rnd_search_cv.best_estimator_
# R^2 values for train and test sets
print("Train set R^2 score: ", poly_5_rnd_search_cv.best_estimator_.score(X_train_scaled, y_train))
print("Test set R^2 score: ", poly_5_rnd_search_cv.best_estimator_.score(X_test_scaled, y_test))
# Model 4
# Sklearn SVR model with 7th order polynomial kernel
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
poly_7_param_distributions = {"gamma": reciprocal(0.001, 1.0), "C": uniform(1, 10)}
poly_7_rnd_search_cv = RandomizedSearchCV(SVR(kernel='poly', degree=7, coef0=1), poly_5_param_distributions, n_iter=5, n_jobs=6, verbose=5, cv=3, random_state=2)
poly_7_rnd_search_cv.fit(X_train_scaled, y_train)
poly_7_rnd_search_cv.best_estimator_
# R^2 values for train and test sets
print("Train set R^2 score: ", poly_7_rnd_search_cv.best_estimator_.score(X_train_scaled, y_train))
print("Test set R^2 score: ", poly_7_rnd_search_cv.best_estimator_.score(X_test_scaled, y_test))
# It turns out polynomial kernel model with degree 5 is a better model than linear svr, rbf model and polynomial kernel with other degrees with the specified set of parameters
| 0.758868 | 0.815269 |
# Regression and the Age of the universe
## Import scientific python packages
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
```
## Age of the universe
The inspiration for today's exercise fame from Lecture 16 of Prof. Lisa Tauxe's [Python for Earth Science Students](https:github.com/ltauxe/Python-for-Earth-Science-Students) class and some of our materials is modified from that lecture. That whole course is open source and pretty great, if you want a deeper dive into python programming.
Linear regression is a method for estimating the potential association between two variables. Today, we will use the retreat velocity of galaxies and supernova as a function of their distance as our example data set. Such data underlies what has come to be known as "Hubble's Law" (same Hubble as for the Hubble telescope). Hubble published these results in 1929 [Hubble, E. P. (1929) Proc. Natl. Acad. Sci., 15, 168–173.] At the time, it was unclear whether the universe was static, expanding, or collapsing. Hubble hypothesized that if the universe were expanding, then everything in it would be moving away from us. The greater the distance between the Earth and the galaxy, the faster it must be moving. So all that had to be done was to measure the distance and velocity of distant galaxies. Easy-peasy - right?
To measure velocity, Hubble made use of the doppler shift. To understand how this works, recall that the pitch you hear as an ambulance approaches changes. During doppler shift, the ambulance's pitch changes from high (as it approaches) to low (as it recedes). The pitch changes because the relative frequency of the sound waves changes. The frequency increases as the ambulance approaches, leading to a higher pitch, and then decreases as it moves away, resulting in a lower pitch.
Just in case you haven't had this life experience, let's listen to such a [siren](https://www.youtube.com/watch?v=imoxDcn2Sgo) here.
<img src="Images/Doppler_Effect.png" width=600>
The same principle applies to light, but rather than hear a change in frequency, we observe a shift in the wavelength (the color) emitted by the galaxy. If a star or galaxy is moving away from us, its absorption bands are shifted towards longer wavelengths - the red end of the visible spectrum. The faster the star or galaxy travels away from the observer, the greater the shift will be to the red:
<img src="Images/dopp-redshift01.jpg" width=300>
So a star (or galaxy) moving away from us will have a red shift with the wavelength being spread out.
<img src="Images/dopp-redshift02.jpg" width=300>
_[Figures from http://www.a-levelphysicstutor.com/wav-doppler.php](http://www.a-levelphysicstutor.com/wav-doppler.php)_
Hubble measured the red shift of different galaxies and converted them to velocities. He then estimated the distance to these objects, which is harder to do (and he was pretty far off).
Improving such data was a major motivation of the Hubble Space Telescope. Those data and continued improvement to approaches for estimating these distances and velocities and investigating additional types of celestial objects is a major focus of ongoing research.
## Type 1a supernovae data
Let's import data from Freedman et al. (2000) of the distance and retreat velocity of type 1a supernovae. These supernovae are described as follows in a review paper that Freedman wrote in 2010 (https://doi.org/10.1146/annurev-astro-082708-101829):
> One of the most accurate means of measuring cosmological distances out into the Hubble flow
utilizes the peak brightness of SNe Ia. The potential of supernovae for measuring distances was
clear to early researchers (e.g., Baade, Minkowski, Zwicky), but it was the Hubble diagram of
Kowal (1968) that set the modern course for this field, followed by decades of work by Sandage,
Tammann, and collaborators (e.g., Sandage & Tammann 1982, 1990; see also the review by
Branch 1998). Analysis by Pskovskii (1984), followed by Phillips (1993), established a correlation
between the magnitude of a SN Ia at peak brightness and the rate at which it declines, thus
allowing supernova luminosities to be “standardized.” This method currently probes farthest
into the unperturbed Hubble flow, and it possesses very low intrinsic scatter:
*Freedman and Madore (2010) who then go onto describe how using Cepheid variable stars (a type of pulsating star) has allowed for the distances to be better calibrated.*
> SNe Ia result from the thermonuclear runaway explosions of stars.
From observations alone, the presence of SNe Ia in elliptical galaxies suggests that they do not
come from massive stars. Many details of the explosion are not yet well understood, but the
generally accepted view is that of a carbon-oxygen, electron-degenerate, nearly-Chandrasekharmass
white dwarf orbiting in a binary system with a close companion *Freedman and Madore (2010)*
```
Supernova_data = pd.read_csv('Data/Freedman2000_Supernova1a.csv')
Supernova_data.tail()
```
The ```VCMB``` column is velocity relative to the cosmic microwave background in km s$^{-1}$. The ```D(Mpc)``` column is the distance in Mpc which is the unit typically used for these measurements. 1 Mpc = 3.09 x 10$^{19}$ km
Go ahead and double-click on this cell to see how I am getting labels that have the proper superscripts.
To create nice labels with superscripts, we can use latex formatting, which can also be done in a markdown cell. For a superscript, first we need to encase the text in dollar signs and then use the ^ symbol to make the following text a superscript. If there is more than one number in the superscript, you must enclose what you want as the superscript in curly braces.
For example, to print $10^3$, we use ```$10^3$``` and for 'per second' (s$^{-1}$): ```s$^{-1}$```
```
plt.scatter(Supernova_data[...],Supernova_data[...],color='red',label='1A Supernovae data (Freedman et al. 2000)')
plt.ylabel('Velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.legend()
plt.show()
```
## Determining the slope of this line (the Hubble constant)
We have distance on the x-axis in megaparsecs and velocity on the y-axis in km/s. The slope of this line is the Hubble constant:
$v = H_o d$
where $v$ is velocity, $d$ is distance, and $H_o$ is the Hubble constant.
This looks a lot like the equation for a line through the data ($y=mx + b$) where $m$ is the slope and $b$ is the y-intercept. In this case, the y-intercept should be 0 or nearly so, and $m$ is $H_o$.
So how do we find the slope?
Here is where we can use linear regression to find the "best fit" line through the data. The approach is to minimize the sum of the squares of the distances (residuals) between the points and a line through them. In this illustration below, the residuals are the vertical distance between each data point and the line:
<img src="Images/Residuals_for_Linear_Regression_Fit.png" width=400>
The approach in linear regression is to find the line that minimizes the squared value of these distances all added up.
<img src="Images/RMSE1.png" width=400>
<img src="Images/RMSE2.png" width=400>
<img src="Images/RMSE3.png" width=400>
<img src="Images/RMSE4.png" width=400>
We determine the best-fit line through this least squares approach using the ```np.polyfit()``` function. A straight line is a first degree polynomial (*note that the function can fit higher order polynomials as well*).
```
np.polyfit?
```
## Fitting a line with `np.polyfit()`
`np.polyfit()` can be used to calculate best fit lines (setting the degree (```deg```) to 1), or higher order curves (setting degree to 2 or higher) returning the slope and the intercept. Let's put it to use:
```
np.polyfit(...)
```
So $H_o$, the slope of the best-fit line, is 67.5 (in the odd units of kilometers per second per megaparsec).
Let's plot the best fit line on our graph.
We can assign the best fitting slope and y-intercept from **np.polyfit( )** to a variable (**m_b**).
```
m_b= np.polyfit(Supernova_data['D(Mpc)'],Supernova_data['VCMB'],1)
print(m_b) #see if that worked
```
**m_b** seems to be an array of coefficients, where the first is the slope and the second is the y-intercept.
We can now use the function `np.polyval()` which will calculate new y values using the model of a linear fit. We can feed **m_b** into **np.polyval( )**, along with our x array to get a new set of y values which are the y values for the best-fit linear model. Then we can plot the model data as a black line along with the original data.
```
model_y_values = np.polyval(m_b,Supernova_data['D(Mpc)'])
model_y_values
plt.scatter(Supernova_data['D(Mpc)'],Supernova_data['VCMB'],
color='red',label='1A Supernovae data (Freedman et al. 2000)')
plt.scatter(Supernova_data['D(Mpc)'],model_y_values,
color='black',marker='s',label='best fit to 1A Supernovae data')
plt.ylabel('Velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.legend()
plt.show()
plt.scatter(Supernova_data['D(Mpc)'],Supernova_data['VCMB'],
color='red',label='1A Supernovae data (Freedman et al. 2000)')
plt.plot(Supernova_data['D(Mpc)'],model_y_values,
color='black',label='best fit to 1A Supernovae data')
plt.ylabel('Velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.legend()
plt.show()
```
## Using this linear model for prediction
What would we predict that the velocity would be for a supernova that happened to be 350 Mpc?
<font color=goldenrod>**_Code for you to write_**</font>
**Use the ```np.polyval()``` function to come up with what the linear model predicts the velocity would be?**
## Evaluating model fit
We'd also like to know who well this model fits our data (i.e. how correlated the data are). We'll use the $R^{2}$ correlation coefficient for this. $R^{2}$ is zero for uncorrelated data, and 1 for perfectly linear data (so no misfit between the model line and data). We'll use the scipy function `stats.linregress` to compute $R^{2}$.
```
from scipy.stats import stats
```
And use it, to get what is normally called the $R^2$ value, which when 1. represents perfect agreement.
<img src="Images/Correlation_examples.svg" width=900>
> Pearson correlation coefficient between several example X,Y sets. Source: https://en.wikipedia.org/wiki/Correlation_and_dependence
```
stats.linregress(Supernova_data['D(Mpc)'],Supernova_data['VCMB'])
```
### Fitting a line with `stats.linregress`
```
slope, intercept, rvalue, pvalue, stderr = stats.linregress(Supernova_data['D(Mpc)'],Supernova_data['VCMB'])
rvalue**2
```
Not a bad fit! We can have confidence that there is a strong correlation between distance and velocity. The universe is expanding.
## Evaluting the fit through plotting residuals
To see how well the regression performs, the data scientist must measure how far off the estimates are from the actual values. These differences are called *residuals*.
$$
\mbox{residual} ~=~ \mbox{observed value} ~-~ \mbox{regression estimate}
$$
A residual is what's left over – the residue – after estimation.
Residuals are the vertical distances of the points from the regression line. There is one residual for each point in the scatter plot. The residual is the difference between the observed value of $y$ and the fitted value of $y$, so for the point $(x, y)$,
$$
\mbox{residual} ~~ = ~~ y ~-~
\mbox{fitted value of }y
~~ = ~~ y ~-~
\mbox{height of regression line at }x
$$
```
residual =
plt.scatter(Supernova_data['D(Mpc)'],residual,color='red')
plt.hlines(0,xmin=0,xmax=500)
plt.xlim(0,500)
plt.show()
```
**The residual plot of a good regression shows no pattern. The residuals look about the same, above and below the horizontal line at 0, across the range of the predictor variable.**
## Estimating the age of the universe
To calculate the age of the universe, we can use Hubble's law:
We had $v=H_o d$ as Hubble's law and we know that distance = velocity x time, or, $d=vt$. So, if we divide both sides by $v$ and we get:
1=$H_o$t.
Solving for $t$ (the age of the universe), we get
$t=1/H_o$ [in some weird units.]
```
t =
print(t)
```
But the units are weird (not years, Mpc s/km). To fix this, we need to know how many kilometers are in a megaparsec. As it happens, there are 3.09 x 10$^{19}$ km/Mpc.
So, we can calculate the age of the universe in seconds (**Age_sec**) by converting the megaparsecs to km:
Age (s) = $t \frac{s \cdot Mpc}{km}$ x $3.09 x 10^{19} \frac {km}{Mpc}$
```
Age_sec=
print(Age_sec)
```
That's a lot of seconds! We should convert seconds to years. Here's another fun fact: there are approximately $\pi$ x 10$^7$ seconds in a year.
More precisely, there are 60 (s/min) x 60 (min/hr) x 24 (hr/day) x 365.25 (days/yr)
```
s_yr=
print('%e'%(s_yr))
```
Ok. so not exactly $\pi \times 10^7$, but close....
```
Age_yrs=
print(Age_yrs)
```
And now in billions of years:
```
print ('Age of the universe (in billions of years):')
print(Age_yrs*1e-9)
```
<font color=goldenrod>**_Code for you to write_**</font>
**Write a function that takes in a Hubble constant value and calculates the age of the Universe in billions of year**
```
def age_of_universe(Hubble_constant):
return age
```
## Using other data sets to estimate the Hubble constant
Determining the Hubble constant continues to be a major avenue of astrophysical research. In fact, Wendy Freedman's group just published a new study (https://arxiv.org/abs/1907.05922) that is summarized in this short video:
https://www.youtube.com/watch?v=awcnVykOKZY
From that paper here is a visualization of Hubble constant determinations over the past 18 years:
<img src="Images/Hubble_Constant_Time.png" width=600>
Let's look at another data set from the 2000 study to see how different data sets can lead to different answers.
## Tully-Fisher Relation galaxy data
> The total luminosity of a spiral galaxy (corrected to face-on inclination to account for extinction)
is strongly correlated with the galaxy’s maximum (corrected to edge-on inclination) rotation
velocity. This relation, calibrated via the Leavitt Law or TRGB, becomes a powerful means of determining
extragalactic distances (Tully&Fisher 1977, Aaronson et al. 1986, Pierce&Tully 1988,
Giovanelli et al. 1997). The TF relation at present is one of the most widely applied methods for
distance measurements *Freedman and Madore (2010)*
<font color=goldenrod>**_Code for you to write_**</font>
**Import the 'Data/Freedman2000_IBandTullyFisher.csv' file. Make a linear fit to determine the slope between `VCMB` and `D(Mpc)`. Calculate the implied age of the universe from these TF galaxy data alone.**
## Going even further out into the universe
Let's look at new data sets available for the classic Hubble problem. I found one published by Betoule et al. in 2014 [http://dx.doi.org/10.1051/0004-6361/201423413](http://dx.doi.org/10.1051/0004-6361/201423413). In this paper, data are plotted using the parameters $z$ and $\mu$ which are related to the red shift velocity and distance. $z$ is the fractional shift in the spectral wavelength and $\mu$ is related to distance.
Here is a plot from the Betoule et al. paper:
<img src="Images/betoule14.png" width=600>
_[Figure from Betoule et al., 2014.] These data are type Ia supernova from different observation collaborations_
Notice that they plotted the data on a log scale. (This hides some surprising things.)
It turns out that we have been looking at data that are low-z (that is relatively close and low red shift). We need to convert $z$ and $\mu$ to distance and velocity to compare to the results we have considered thus far.
According to [http://hyperphysics.phy-astr.gsu.edu/hbase/Astro/hubble.html](http://hyperphysics.phy-astr.gsu.edu/hbase/Astro/hubble.html)
velocity $v$ (as fraction of the speed of light, $c$) is given by
${v\over c}= \bigl({{(z+1)^2-1} \over {(z+1)^2+1}}\bigr)$
where $c=3 \times 10^8$m s$^{-1}$.
And according to the Betoule et al. (2014) paper, $\mu$ relates to distance in parsecs $d$ like this:
$\mu=5\log(d/10)$.
Let's read in the data (available from this website: http://cdsarc.u-strasbg.fr/viz-bin/qcat?J/A+A/568/A22#sRM2.2), which are averages of the data shown in the figure above,and take a peek.
```
Betoule_data = pd.read_csv('Data/mu_z.csv',header=1)
Betoule_data.head()
```
Now we can plot it the same way as the cosmologists did in the paper, using $\mu$ and $\log z$:
```
plt.scatter(Betoule_data[...],Betoule_data[...],color='blue')
plt.xlabel('z')
plt.ylabel('$\mu$')
plt.semilogx()
plt.show()
```
To compare these new data with the previous considered data, we must do the following:
- Transform $z$ to velocity
- Transform $\mu$ to distance using the equations provided.
- Truncate the new dataset which goes to much farther distances than the 'old' data set
```
c = 2.9979e8 / 1000 # speed of light in km/s
# the formula for v from z
Betoule_data['velocity'] = c * (((Betoule_data['z']+1.)**2-1.)/((Betoule_data['z']+1.)**2+1.))
Betoule_data['distance']=10000*(10.**((Betoule_data['mu'])/5.))*1e-9 # convert mu to Gpc
plt.figure(figsize=(8,6))
plt.scatter(Betoule_data['distance'],Betoule_data['velocity'],
color='blue',label='1A Supernovae data (Betoule et al. 2014)')
plt.scatter(Supernova_data['D(Mpc)'],Supernova_data['VCMB'],
color='red',label='1A Supernovae data (Freedman et al. 2000)')
plt.ylabel('Velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.legend()
plt.show()
```
These data sets are similar to one another for the "close" objects, but we can see that a linear model doesn't work well for objects that are at greater distances.
To visualize this reality, let's plot the fit to the Freedman et al. 2000 data atop this plot (applying it to the Betoule distances using `np.polyval()`.
```
model_y_values = np.polyval(m_b,Betoule_data['distance'])
plt.figure(figsize=(8,6))
plt.scatter(Betoule_data['distance'],Betoule_data['velocity'],
color='blue',label='1A Supernovae data (Betoule et al. 2014)')
plt.scatter(Supernova_data['D(Mpc)'],Supernova_data['VCMB'],
color='red',label='1A Supernovae data (Freedman et al. 2000)')
plt.plot(Betoule_data['distance'],model_y_values,
color='black',label='1A Supernovae fit to Freedman data')
plt.ylabel('Velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.legend()
plt.show()
```
Clearly this fit is quite poor.
Let's make a first-order polynomial fit to all the Betoule data and then plot the residual:
```
fit = np.polyfit(Betoule_data['distance'],Betoule_data['velocity'],1)
y_values = np.polyval(fit,Betoule_data['distance'])
plt.subplot(2,1,1)
plt.scatter(Betoule_data['distance'],Betoule_data['velocity'])
plt.plot(Betoule_data['distance'],y_values,color='orange',)
plt.title('data and a polynomial degree 1 fit')
plt.ylabel('Velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.subplot(2,1,2)
plt.scatter(Betoule_data['distance'],Betoule_data['velocity']-y_values)
plt.title('residuals of a polynomial degree 1 fit')
plt.ylabel('Residual velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.tight_layout()
plt.show()
```
There is a lot of structure to the residual of this degree 1 fit. Let's try a degree 2 polynomial fit (known as quadratic):
$f(x)=ax^2+bx+c$
```
fit = np.polyfit(Betoule_data['distance'],Betoule_data['velocity'],2)
y_values = np.polyval(fit,Betoule_data['distance'])
plt.subplot(2,1,1)
plt.scatter(Betoule_data['distance'],Betoule_data['velocity'])
plt.plot(Betoule_data['distance'],y_values,color='orange',)
plt.title('data and a polynomial degree 2 fit')
plt.ylabel('Velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.subplot(2,1,2)
plt.scatter(Betoule_data['distance'],Betoule_data['velocity']-y_values)
plt.title('residuals of a polynomial degree 2 fit')
plt.ylabel('Residual velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.tight_layout()
plt.show()
```
There is a lot of structure to the residuals of this degree 2 fit (and the residuals are still high). Let's try a degree 3 polynomial fit (known as cubic):
$f(x)=ax^3+bx^2+cx+d$
```
fit = np.polyfit(Betoule_data['distance'],Betoule_data['velocity'],3)
y_values = np.polyval(fit,Betoule_data['distance'])
plt.subplot(2,1,1)
plt.scatter(Betoule_data['distance'],Betoule_data['velocity'])
plt.plot(Betoule_data['distance'],y_values,color='orange',)
plt.title('data and a polynomial degree 3 fit')
plt.ylabel('Velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.subplot(2,1,2)
plt.scatter(Betoule_data['distance'],Betoule_data['velocity']-y_values)
plt.title('residuals of a polynomial degree 3 fit')
plt.ylabel('Residual velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.tight_layout()
plt.show()
```
Can a degree 4 polynomial fit do better?
$f(x)=ax^4+bx^3+cx^2+dx+e$
```
fit = np.polyfit(Betoule_data['distance'],Betoule_data['velocity'],4)
y_values = np.polyval(fit,Betoule_data['distance'])
plt.subplot(2,1,1)
plt.scatter(Betoule_data['distance'],Betoule_data['velocity'])
plt.plot(Betoule_data['distance'],y_values,color='orange',)
plt.title('data and a polynomial degree 4 fit')
plt.ylabel('Velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.subplot(2,1,2)
plt.scatter(Betoule_data['distance'],Betoule_data['velocity']-y_values)
plt.title('residuals of a polynomial degree 4 fit')
plt.ylabel('Residual velocity (km s$^{-1}$)')
plt.xlabel('Distance (Mpc)')
plt.tight_layout()
plt.show()
```
That looks about the same as the cubic so might as well stick with that one as a working model.
That the velocity-distance relationship is not linear is taken as evidence that the expansion of the universe is accelerating. This acceleration is attributed to dark energy:
> In a matter-dominated universe, the expansion velocity of the Universe slows down over
time owing to the attractive force of gravity. However, a decade ago two independent groups (Perlmutter et al. 1999, Riess et al. 1998) found that supernovae at z ∼ 0.5 appear to be about 10%
fainter than those observed locally, consistent instead with models in which the expansion velocity
is increasing; that is, a universe that is accelerating in its expansion. Combined with independent
estimates of the matter density, these results are consistent with a universe in which one-third of
the overall density is in the form of matter (ordinary plus dark), and two-thirds is in a form having
a large, negative pressure, termed dark energy. *Freedman and Madore (2010)*
### Turn in the Notebook
**Export as HTML and upload to bCourses.**
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
Supernova_data = pd.read_csv('Data/Freedman2000_Supernova1a.csv')
Supernova_data.tail()
## Determining the slope of this line (the Hubble constant)
We have distance on the x-axis in megaparsecs and velocity on the y-axis in km/s. The slope of this line is the Hubble constant:
$v = H_o d$
where $v$ is velocity, $d$ is distance, and $H_o$ is the Hubble constant.
This looks a lot like the equation for a line through the data ($y=mx + b$) where $m$ is the slope and $b$ is the y-intercept. In this case, the y-intercept should be 0 or nearly so, and $m$ is $H_o$.
So how do we find the slope?
Here is where we can use linear regression to find the "best fit" line through the data. The approach is to minimize the sum of the squares of the distances (residuals) between the points and a line through them. In this illustration below, the residuals are the vertical distance between each data point and the line:
<img src="Images/Residuals_for_Linear_Regression_Fit.png" width=400>
The approach in linear regression is to find the line that minimizes the squared value of these distances all added up.
<img src="Images/RMSE1.png" width=400>
<img src="Images/RMSE2.png" width=400>
<img src="Images/RMSE3.png" width=400>
<img src="Images/RMSE4.png" width=400>
We determine the best-fit line through this least squares approach using the ```np.polyfit()``` function. A straight line is a first degree polynomial (*note that the function can fit higher order polynomials as well*).
## Fitting a line with `np.polyfit()`
`np.polyfit()` can be used to calculate best fit lines (setting the degree (```deg```) to 1), or higher order curves (setting degree to 2 or higher) returning the slope and the intercept. Let's put it to use:
So $H_o$, the slope of the best-fit line, is 67.5 (in the odd units of kilometers per second per megaparsec).
Let's plot the best fit line on our graph.
We can assign the best fitting slope and y-intercept from **np.polyfit( )** to a variable (**m_b**).
**m_b** seems to be an array of coefficients, where the first is the slope and the second is the y-intercept.
We can now use the function `np.polyval()` which will calculate new y values using the model of a linear fit. We can feed **m_b** into **np.polyval( )**, along with our x array to get a new set of y values which are the y values for the best-fit linear model. Then we can plot the model data as a black line along with the original data.
## Using this linear model for prediction
What would we predict that the velocity would be for a supernova that happened to be 350 Mpc?
<font color=goldenrod>**_Code for you to write_**</font>
**Use the ```np.polyval()``` function to come up with what the linear model predicts the velocity would be?**
## Evaluating model fit
We'd also like to know who well this model fits our data (i.e. how correlated the data are). We'll use the $R^{2}$ correlation coefficient for this. $R^{2}$ is zero for uncorrelated data, and 1 for perfectly linear data (so no misfit between the model line and data). We'll use the scipy function `stats.linregress` to compute $R^{2}$.
And use it, to get what is normally called the $R^2$ value, which when 1. represents perfect agreement.
<img src="Images/Correlation_examples.svg" width=900>
> Pearson correlation coefficient between several example X,Y sets. Source: https://en.wikipedia.org/wiki/Correlation_and_dependence
### Fitting a line with `stats.linregress`
Not a bad fit! We can have confidence that there is a strong correlation between distance and velocity. The universe is expanding.
## Evaluting the fit through plotting residuals
To see how well the regression performs, the data scientist must measure how far off the estimates are from the actual values. These differences are called *residuals*.
$$
\mbox{residual} ~=~ \mbox{observed value} ~-~ \mbox{regression estimate}
$$
A residual is what's left over – the residue – after estimation.
Residuals are the vertical distances of the points from the regression line. There is one residual for each point in the scatter plot. The residual is the difference between the observed value of $y$ and the fitted value of $y$, so for the point $(x, y)$,
$$
\mbox{residual} ~~ = ~~ y ~-~
\mbox{fitted value of }y
~~ = ~~ y ~-~
\mbox{height of regression line at }x
$$
**The residual plot of a good regression shows no pattern. The residuals look about the same, above and below the horizontal line at 0, across the range of the predictor variable.**
## Estimating the age of the universe
To calculate the age of the universe, we can use Hubble's law:
We had $v=H_o d$ as Hubble's law and we know that distance = velocity x time, or, $d=vt$. So, if we divide both sides by $v$ and we get:
1=$H_o$t.
Solving for $t$ (the age of the universe), we get
$t=1/H_o$ [in some weird units.]
But the units are weird (not years, Mpc s/km). To fix this, we need to know how many kilometers are in a megaparsec. As it happens, there are 3.09 x 10$^{19}$ km/Mpc.
So, we can calculate the age of the universe in seconds (**Age_sec**) by converting the megaparsecs to km:
Age (s) = $t \frac{s \cdot Mpc}{km}$ x $3.09 x 10^{19} \frac {km}{Mpc}$
That's a lot of seconds! We should convert seconds to years. Here's another fun fact: there are approximately $\pi$ x 10$^7$ seconds in a year.
More precisely, there are 60 (s/min) x 60 (min/hr) x 24 (hr/day) x 365.25 (days/yr)
Ok. so not exactly $\pi \times 10^7$, but close....
And now in billions of years:
<font color=goldenrod>**_Code for you to write_**</font>
**Write a function that takes in a Hubble constant value and calculates the age of the Universe in billions of year**
## Using other data sets to estimate the Hubble constant
Determining the Hubble constant continues to be a major avenue of astrophysical research. In fact, Wendy Freedman's group just published a new study (https://arxiv.org/abs/1907.05922) that is summarized in this short video:
https://www.youtube.com/watch?v=awcnVykOKZY
From that paper here is a visualization of Hubble constant determinations over the past 18 years:
<img src="Images/Hubble_Constant_Time.png" width=600>
Let's look at another data set from the 2000 study to see how different data sets can lead to different answers.
## Tully-Fisher Relation galaxy data
> The total luminosity of a spiral galaxy (corrected to face-on inclination to account for extinction)
is strongly correlated with the galaxy’s maximum (corrected to edge-on inclination) rotation
velocity. This relation, calibrated via the Leavitt Law or TRGB, becomes a powerful means of determining
extragalactic distances (Tully&Fisher 1977, Aaronson et al. 1986, Pierce&Tully 1988,
Giovanelli et al. 1997). The TF relation at present is one of the most widely applied methods for
distance measurements *Freedman and Madore (2010)*
<font color=goldenrod>**_Code for you to write_**</font>
**Import the 'Data/Freedman2000_IBandTullyFisher.csv' file. Make a linear fit to determine the slope between `VCMB` and `D(Mpc)`. Calculate the implied age of the universe from these TF galaxy data alone.**
## Going even further out into the universe
Let's look at new data sets available for the classic Hubble problem. I found one published by Betoule et al. in 2014 [http://dx.doi.org/10.1051/0004-6361/201423413](http://dx.doi.org/10.1051/0004-6361/201423413). In this paper, data are plotted using the parameters $z$ and $\mu$ which are related to the red shift velocity and distance. $z$ is the fractional shift in the spectral wavelength and $\mu$ is related to distance.
Here is a plot from the Betoule et al. paper:
<img src="Images/betoule14.png" width=600>
_[Figure from Betoule et al., 2014.] These data are type Ia supernova from different observation collaborations_
Notice that they plotted the data on a log scale. (This hides some surprising things.)
It turns out that we have been looking at data that are low-z (that is relatively close and low red shift). We need to convert $z$ and $\mu$ to distance and velocity to compare to the results we have considered thus far.
According to [http://hyperphysics.phy-astr.gsu.edu/hbase/Astro/hubble.html](http://hyperphysics.phy-astr.gsu.edu/hbase/Astro/hubble.html)
velocity $v$ (as fraction of the speed of light, $c$) is given by
${v\over c}= \bigl({{(z+1)^2-1} \over {(z+1)^2+1}}\bigr)$
where $c=3 \times 10^8$m s$^{-1}$.
And according to the Betoule et al. (2014) paper, $\mu$ relates to distance in parsecs $d$ like this:
$\mu=5\log(d/10)$.
Let's read in the data (available from this website: http://cdsarc.u-strasbg.fr/viz-bin/qcat?J/A+A/568/A22#sRM2.2), which are averages of the data shown in the figure above,and take a peek.
Now we can plot it the same way as the cosmologists did in the paper, using $\mu$ and $\log z$:
To compare these new data with the previous considered data, we must do the following:
- Transform $z$ to velocity
- Transform $\mu$ to distance using the equations provided.
- Truncate the new dataset which goes to much farther distances than the 'old' data set
These data sets are similar to one another for the "close" objects, but we can see that a linear model doesn't work well for objects that are at greater distances.
To visualize this reality, let's plot the fit to the Freedman et al. 2000 data atop this plot (applying it to the Betoule distances using `np.polyval()`.
Clearly this fit is quite poor.
Let's make a first-order polynomial fit to all the Betoule data and then plot the residual:
There is a lot of structure to the residual of this degree 1 fit. Let's try a degree 2 polynomial fit (known as quadratic):
$f(x)=ax^2+bx+c$
There is a lot of structure to the residuals of this degree 2 fit (and the residuals are still high). Let's try a degree 3 polynomial fit (known as cubic):
$f(x)=ax^3+bx^2+cx+d$
Can a degree 4 polynomial fit do better?
$f(x)=ax^4+bx^3+cx^2+dx+e$
| 0.861188 | 0.991668 |
# Otimização por Múltiplos Enxames Aplicada ao Escalonamento Dinâmico de Projetos de Software
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from dspsp_analysis import (algorithm_dataset, instance_dataset, comparison_dataset,
max_event, metrics_dataset, mean_hypervolume)
%matplotlib inline
data = metrics_dataset()
instance_ids = list(data.instance.unique())
algorithm_ids = list(data.algorithm.unique())
instance_datasets = {}
for inst in instance_ids:
ds = instance_dataset(data, inst, zero_values=True)
instance_datasets[inst] = {alg: algorithm_dataset(ds, alg, zero_values=True) for alg in algorithm_ids}
comparison_datasets = {}
for inst in instance_ids:
comparison_datasets[inst] = {alg: comparison_dataset(instance_datasets[inst][alg]) for alg in algorithm_ids}
for inst in instance_ids:
min_event = min([max_event(ds) for ds in comparison_datasets[inst].values()])
for k in comparison_datasets[inst].keys():
comparison_datasets[inst][k] = comparison_datasets[inst][k][:min_event+1]
mean_hypervolume(data)
mean_hypervolume(data).index
comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAII"].tail()
comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAIIDynamic"].tail()
comparison_datasets["ST10_DT10_E5_SK4-5"]["SMPSO"].tail()
```
Geração dos gráficos comparativos
```
plt.title("NSGA-II x NSGA-II Dinamico")
plt.xlabel("Eventos dinâmicos (pontos de reescalonamento)")
plt.ylabel("Hipervolume")
plt.plot(comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAII"].event, comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAII"].hypervolume, "-ro", markersize=3, lw=.5, label="NSGAII")
plt.plot(comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAIIDynamic"].event, comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAIIDynamic"].hypervolume, "-go", markersize=3, lw=.5, label="NSGAIIDyn")
plt.legend()
plt.show()
plt.title("SMPSO x SMPSO Dinamico")
plt.xlabel("Eventos dinâmicos (pontos de reescalonamento)")
plt.ylabel("Hipervolume")
plt.plot(datasets["smpso"].event, datasets["smpso"].hypervolume, "-ro", markersize=3, lw=.5, label="SMPSO")
plt.plot(datasets["smpso_dyn"].event, datasets["smpso_dyn"].hypervolume, "-go", markersize=3, lw=.5, label="SMPSODyn")
plt.legend()
plt.show()
plt.title("NSGA-II x SMPSO")
plt.xlabel("Eventos dinâmicos (pontos de reescalonamento)")
plt.ylabel("Hipervolume")
plt.plot(datasets["nsgaii"].event, datasets["nsgaii"].hypervolume, "-ro", markersize=3, lw=.5, label="NSGAII")
plt.plot(datasets["smpso"].event, datasets["smpso"].hypervolume, "-go", markersize=3, lw=.5, label="SMPSO")
plt.legend()
plt.show()
plt.title("NSGA-II Dinamico x SMPSO Dinamico")
plt.xlabel("Eventos dinâmicos (pontos de reescalonamento)")
plt.ylabel("Hipervolume")
plt.plot(datasets["nsgaii_dyn"].event, datasets["nsgaii_dyn"].hypervolume, "-ro", markersize=3, lw=.5, label="NSGAIIDyn")
plt.plot(datasets["smpso_dyn"].event, datasets["smpso_dyn"].hypervolume, "-go", markersize=3, lw=.5, label="SMPSODyn")
plt.legend()
plt.show()
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from dspsp_analysis import (algorithm_dataset, instance_dataset, comparison_dataset,
max_event, metrics_dataset, mean_hypervolume)
%matplotlib inline
data = metrics_dataset()
instance_ids = list(data.instance.unique())
algorithm_ids = list(data.algorithm.unique())
instance_datasets = {}
for inst in instance_ids:
ds = instance_dataset(data, inst, zero_values=True)
instance_datasets[inst] = {alg: algorithm_dataset(ds, alg, zero_values=True) for alg in algorithm_ids}
comparison_datasets = {}
for inst in instance_ids:
comparison_datasets[inst] = {alg: comparison_dataset(instance_datasets[inst][alg]) for alg in algorithm_ids}
for inst in instance_ids:
min_event = min([max_event(ds) for ds in comparison_datasets[inst].values()])
for k in comparison_datasets[inst].keys():
comparison_datasets[inst][k] = comparison_datasets[inst][k][:min_event+1]
mean_hypervolume(data)
mean_hypervolume(data).index
comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAII"].tail()
comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAIIDynamic"].tail()
comparison_datasets["ST10_DT10_E5_SK4-5"]["SMPSO"].tail()
plt.title("NSGA-II x NSGA-II Dinamico")
plt.xlabel("Eventos dinâmicos (pontos de reescalonamento)")
plt.ylabel("Hipervolume")
plt.plot(comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAII"].event, comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAII"].hypervolume, "-ro", markersize=3, lw=.5, label="NSGAII")
plt.plot(comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAIIDynamic"].event, comparison_datasets["ST10_DT10_E5_SK4-5"]["NSGAIIDynamic"].hypervolume, "-go", markersize=3, lw=.5, label="NSGAIIDyn")
plt.legend()
plt.show()
plt.title("SMPSO x SMPSO Dinamico")
plt.xlabel("Eventos dinâmicos (pontos de reescalonamento)")
plt.ylabel("Hipervolume")
plt.plot(datasets["smpso"].event, datasets["smpso"].hypervolume, "-ro", markersize=3, lw=.5, label="SMPSO")
plt.plot(datasets["smpso_dyn"].event, datasets["smpso_dyn"].hypervolume, "-go", markersize=3, lw=.5, label="SMPSODyn")
plt.legend()
plt.show()
plt.title("NSGA-II x SMPSO")
plt.xlabel("Eventos dinâmicos (pontos de reescalonamento)")
plt.ylabel("Hipervolume")
plt.plot(datasets["nsgaii"].event, datasets["nsgaii"].hypervolume, "-ro", markersize=3, lw=.5, label="NSGAII")
plt.plot(datasets["smpso"].event, datasets["smpso"].hypervolume, "-go", markersize=3, lw=.5, label="SMPSO")
plt.legend()
plt.show()
plt.title("NSGA-II Dinamico x SMPSO Dinamico")
plt.xlabel("Eventos dinâmicos (pontos de reescalonamento)")
plt.ylabel("Hipervolume")
plt.plot(datasets["nsgaii_dyn"].event, datasets["nsgaii_dyn"].hypervolume, "-ro", markersize=3, lw=.5, label="NSGAIIDyn")
plt.plot(datasets["smpso_dyn"].event, datasets["smpso_dyn"].hypervolume, "-go", markersize=3, lw=.5, label="SMPSODyn")
plt.legend()
plt.show()
| 0.556641 | 0.828696 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.